summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore9
-rw-r--r--APACHE-2.0.txt202
-rw-r--r--README11
-rw-r--r--SConstruct213
-rw-r--r--buildscripts/__init__.py5
-rw-r--r--buildscripts/bb.py2
-rw-r--r--buildscripts/cleanbb.py43
-rw-r--r--buildscripts/confluence_export.py82
-rw-r--r--buildscripts/frob_version.py69
-rw-r--r--buildscripts/hacks_mandriva.py9
-rw-r--r--buildscripts/hacks_ubuntu.py2
-rw-r--r--buildscripts/makedist.py799
-rw-r--r--buildscripts/s3md5.py48
-rw-r--r--buildscripts/utils.py47
-rw-r--r--client/clientOnly.cpp11
-rw-r--r--client/connpool.cpp29
-rw-r--r--client/connpool.h13
-rw-r--r--client/dbclient.cpp75
-rw-r--r--client/dbclient.h65
-rw-r--r--client/examples/httpClientTest.cpp43
-rw-r--r--client/parallel.cpp29
-rw-r--r--client/parallel.h6
-rw-r--r--client/syncclusterconnection.cpp150
-rw-r--r--client/syncclusterconnection.h55
-rw-r--r--db/background.h56
-rw-r--r--db/btree.cpp95
-rw-r--r--db/btree.h53
-rw-r--r--db/btreecursor.cpp12
-rw-r--r--db/client.cpp193
-rw-r--r--db/client.h165
-rw-r--r--db/clientcursor.cpp21
-rw-r--r--db/clientcursor.h32
-rw-r--r--db/cloner.cpp122
-rw-r--r--db/cmdline.cpp162
-rw-r--r--db/cmdline.h21
-rw-r--r--db/commands.cpp18
-rw-r--r--db/commands.h19
-rw-r--r--db/common.cpp14
-rw-r--r--db/concurrency.h153
-rw-r--r--db/curop.h239
-rw-r--r--db/cursor.h10
-rw-r--r--db/database.h24
-rw-r--r--db/db.cpp239
-rw-r--r--db/db.h147
-rw-r--r--db/db.sln3
-rw-r--r--db/db.vcproj134
-rw-r--r--db/dbcommands.cpp614
-rw-r--r--db/dbcommands_admin.cpp51
-rw-r--r--db/dbeval.cpp10
-rw-r--r--db/dbmessage.h19
-rw-r--r--db/dbstats.cpp43
-rw-r--r--db/dbstats.h44
-rw-r--r--db/dbwebserver.cpp305
-rw-r--r--db/diskloc.h (renamed from db/storage.h)0
-rw-r--r--db/driverHelpers.cpp63
-rw-r--r--db/extsort.cpp45
-rw-r--r--db/extsort.h34
-rw-r--r--db/flushtest.cpp16
-rw-r--r--db/index.cpp193
-rw-r--r--db/index.h151
-rw-r--r--db/index_geo2d.cpp1675
-rw-r--r--db/instance.cpp391
-rw-r--r--db/instance.h66
-rw-r--r--db/introspect.cpp3
-rw-r--r--db/jsobj.cpp239
-rw-r--r--db/jsobj.h275
-rw-r--r--db/jsobjmanipulator.h102
-rw-r--r--db/json.cpp25
-rw-r--r--db/lasterror.cpp8
-rw-r--r--db/lasterror.h8
-rw-r--r--db/matcher.cpp554
-rw-r--r--db/matcher.h76
-rw-r--r--db/module.cpp16
-rw-r--r--db/modules/mms.cpp180
-rw-r--r--db/mr.cpp153
-rw-r--r--db/namespace.cpp81
-rw-r--r--db/namespace.h52
-rw-r--r--db/nonce.cpp4
-rw-r--r--db/pdfile.cpp514
-rw-r--r--db/pdfile.h12
-rw-r--r--db/query.cpp685
-rw-r--r--db/query.h207
-rw-r--r--db/queryoptimizer.cpp103
-rw-r--r--db/queryoptimizer.h27
-rw-r--r--db/queryutil.cpp464
-rw-r--r--db/queryutil.h29
-rw-r--r--db/rec.h16
-rw-r--r--db/reccache.cpp694
-rw-r--r--db/reccache.h28
-rw-r--r--db/reci.h18
-rw-r--r--db/recstore.h232
-rw-r--r--db/repl.cpp513
-rw-r--r--db/repl.h196
-rw-r--r--db/replset.h8
-rw-r--r--db/scanandorder.h2
-rw-r--r--db/security.cpp34
-rw-r--r--db/security.h49
-rw-r--r--db/security_commands.cpp47
-rw-r--r--db/stats/counters.cpp131
-rw-r--r--db/stats/counters.h121
-rw-r--r--db/stats/snapshots.cpp144
-rw-r--r--db/stats/snapshots.h113
-rw-r--r--db/stats/top.cpp181
-rw-r--r--db/stats/top.h (renamed from util/top.h)77
-rw-r--r--db/storage.cpp16
-rw-r--r--db/update.cpp518
-rw-r--r--db/update.h416
-rw-r--r--dbtests/basictests.cpp89
-rw-r--r--dbtests/btreetests.cpp7
-rw-r--r--dbtests/clienttests.cpp74
-rw-r--r--dbtests/cursortests.cpp8
-rw-r--r--dbtests/dbtests.cpp1
-rw-r--r--dbtests/framework.cpp2
-rw-r--r--dbtests/jsobjtests.cpp239
-rw-r--r--dbtests/jsontests.cpp4
-rw-r--r--dbtests/jstests.cpp130
-rw-r--r--dbtests/namespacetests.cpp11
-rw-r--r--dbtests/pairingtests.cpp2
-rw-r--r--dbtests/pdfiletests.cpp8
-rw-r--r--dbtests/perf/perftest.cpp6
-rw-r--r--dbtests/queryoptimizertests.cpp40
-rw-r--r--dbtests/querytests.cpp196
-rw-r--r--dbtests/repltests.cpp112
-rw-r--r--dbtests/test.vcproj78
-rw-r--r--dbtests/threadedtests.cpp24
-rw-r--r--dbtests/updatetests.cpp25
-rw-r--r--debian/changelog30
-rw-r--r--debian/control6
-rw-r--r--debian/files1
-rw-r--r--debian/init.d103
-rw-r--r--debian/lintian-overrides11
-rw-r--r--debian/mongod.116
-rw-r--r--debian/mongodb.conf95
-rw-r--r--debian/mongodb.upstart15
-rw-r--r--debian/mongoimport.163
-rw-r--r--debian/mongoimportjson.145
-rw-r--r--debian/mongosniff.130
-rw-r--r--debian/mongostat.139
-rw-r--r--debian/postinst10
-rw-r--r--debian/rules18
-rw-r--r--debian/ubuntu/mongodb.conf13
-rw-r--r--debian/ubuntu/mongodb_settings.conf6
-rw-r--r--doxygenConfig2
-rw-r--r--jstests/array_match1.js31
-rw-r--r--jstests/arrayfind1.js6
-rw-r--r--jstests/arrayfind2.js35
-rw-r--r--jstests/auth/auth1.js73
-rw-r--r--jstests/auth/copyauth.js29
-rw-r--r--jstests/auth1.js2
-rw-r--r--jstests/auth2.js5
-rw-r--r--jstests/capped3.js22
-rw-r--r--jstests/capped5.js32
-rw-r--r--jstests/clone/clonecollection.js89
-rw-r--r--jstests/copydb2.js17
-rw-r--r--jstests/cursor8.js23
-rw-r--r--jstests/dbadmin.js7
-rw-r--r--jstests/dbhash.js43
-rw-r--r--jstests/disk/directoryperdb.js62
-rw-r--r--jstests/disk/diskfull.js7
-rw-r--r--jstests/disk/newcollection.js13
-rw-r--r--jstests/disk/preallocate.js10
-rw-r--r--jstests/disk/repair.js18
-rw-r--r--jstests/drop.js2
-rw-r--r--jstests/dropIndex.js16
-rw-r--r--jstests/exists2.js14
-rw-r--r--jstests/explain2.js27
-rw-r--r--jstests/find6.js30
-rw-r--r--jstests/find7.js8
-rw-r--r--jstests/geo1.js41
-rw-r--r--jstests/geo2.js43
-rw-r--r--jstests/geo3.js87
-rw-r--r--jstests/geo4.js10
-rw-r--r--jstests/geo5.js18
-rw-r--r--jstests/geo6.js23
-rw-r--r--jstests/geo7.js20
-rw-r--r--jstests/geo8.js13
-rw-r--r--jstests/geo9.js28
-rw-r--r--jstests/geo_box1.js43
-rw-r--r--jstests/geo_box2.js19
-rw-r--r--jstests/geo_circle1.js50
-rw-r--r--jstests/geoa.js12
-rw-r--r--jstests/geob.js35
-rw-r--r--jstests/geoc.js24
-rw-r--r--jstests/group2.js4
-rw-r--r--jstests/group3.js2
-rw-r--r--jstests/hint1.js4
-rw-r--r--jstests/in.js1
-rw-r--r--jstests/in3.js11
-rw-r--r--jstests/inc2.js2
-rw-r--r--jstests/index10.js8
-rw-r--r--jstests/index7.js26
-rw-r--r--jstests/index8.js13
-rw-r--r--jstests/index_check2.js2
-rw-r--r--jstests/index_diag.js38
-rw-r--r--jstests/indexg.js13
-rw-r--r--jstests/insert1.js41
-rw-r--r--jstests/json1.js4
-rw-r--r--jstests/mod1.js1
-rw-r--r--jstests/mr5.js27
-rw-r--r--jstests/mr_bigobject.js41
-rw-r--r--jstests/mr_errorhandling.js47
-rw-r--r--jstests/nin.js3
-rw-r--r--jstests/not2.js139
-rw-r--r--jstests/parallel/basic.js5
-rw-r--r--jstests/parallel/basicPlus.js6
-rw-r--r--jstests/parallel/repl.js55
-rw-r--r--jstests/profile1.js2
-rw-r--r--jstests/pullall.js2
-rw-r--r--jstests/regex4.js2
-rw-r--r--jstests/regex5.js44
-rw-r--r--jstests/regex6.js15
-rw-r--r--jstests/regex7.js26
-rw-r--r--jstests/regex8.js19
-rw-r--r--jstests/regex9.js11
-rw-r--r--jstests/regex_embed1.js25
-rw-r--r--jstests/repl/basic1.js45
-rw-r--r--jstests/repl/master1.js49
-rw-r--r--jstests/repl/pair1.js1
-rw-r--r--jstests/repl/pair3.js2
-rw-r--r--jstests/repl/pair4.js1
-rw-r--r--jstests/repl/pair5.js2
-rw-r--r--jstests/repl/pair7.js85
-rw-r--r--jstests/repl/repl10.js38
-rw-r--r--jstests/repl/repl11.js59
-rw-r--r--jstests/repl/repl4.js8
-rw-r--r--jstests/repl/replacePeer1.js25
-rw-r--r--jstests/repl/replacePeer2.js31
-rw-r--r--jstests/repl/snapshot1.js34
-rw-r--r--jstests/repl/snapshot2.js50
-rw-r--r--jstests/repl/snapshot3.js50
-rw-r--r--jstests/run_program1.js19
-rw-r--r--jstests/set5.js17
-rw-r--r--jstests/set6.js20
-rw-r--r--jstests/set7.js40
-rw-r--r--jstests/sharding/findandmodify1.js57
-rw-r--r--jstests/sharding/key_many.js15
-rw-r--r--jstests/sharding/moveshard1.js6
-rw-r--r--jstests/sharding/shard2.js2
-rw-r--r--jstests/sharding/sync1.js21
-rw-r--r--jstests/sharding/sync2.js48
-rw-r--r--jstests/shellkillop.js18
-rw-r--r--jstests/shellspawn.js6
-rw-r--r--jstests/slow/indexbg1.js117
-rw-r--r--jstests/slow/indexbg2.js83
-rw-r--r--jstests/sort5.js8
-rw-r--r--jstests/sort6.js38
-rw-r--r--jstests/storefunc.js11
-rw-r--r--jstests/testminmax.js14
-rw-r--r--jstests/tool/csv1.js11
-rw-r--r--jstests/tool/tool1.js4
-rw-r--r--jstests/type1.js1
-rw-r--r--jstests/unset2.js23
-rw-r--r--jstests/update6.js2
-rw-r--r--jstests/update_addToSet.js41
-rw-r--r--jstests/update_arraymatch1.js16
-rw-r--r--jstests/update_arraymatch2.js16
-rw-r--r--jstests/update_arraymatch3.js17
-rw-r--r--jstests/updatec.js14
-rw-r--r--lib/libboost_thread-gcc41-mt-d-1_34_1.abin0 -> 692920 bytes
-rw-r--r--mongo.xcodeproj/project.pbxproj132
-rw-r--r--msvc/msvc_scripting.cpp16
-rw-r--r--rpm/init.d-mongod45
-rw-r--r--rpm/mongo.mdv.spec143
-rw-r--r--rpm/mongo.spec26
-rw-r--r--rpm/mongod.conf5
-rw-r--r--rpm/mongod.sysconfig1
-rw-r--r--s/chunk.cpp45
-rw-r--r--s/chunk.h40
-rw-r--r--s/commands_admin.cpp10
-rw-r--r--s/commands_public.cpp158
-rw-r--r--s/config.cpp49
-rw-r--r--s/config.h4
-rw-r--r--s/cursors.cpp16
-rw-r--r--s/cursors.h16
-rw-r--r--s/d_logic.cpp11
-rw-r--r--s/d_logic.h16
-rw-r--r--s/dbgrid.vcproj44
-rw-r--r--s/request.cpp8
-rw-r--r--s/request.h18
-rw-r--r--s/s_only.cpp4
-rw-r--r--s/server.cpp132
-rw-r--r--s/strategy.cpp24
-rw-r--r--s/strategy.h16
-rw-r--r--s/strategy_shard.cpp16
-rw-r--r--s/strategy_single.cpp16
-rw-r--r--s/util.h1
-rw-r--r--scripting/engine.cpp34
-rw-r--r--scripting/engine.h14
-rw-r--r--scripting/engine_spidermonkey.cpp92
-rw-r--r--scripting/engine_spidermonkey.h20
-rw-r--r--scripting/sm_db.cpp181
-rw-r--r--scripting/utils.cpp52
-rw-r--r--scripting/v8_db.cpp210
-rw-r--r--scripting/v8_db.h6
-rw-r--r--scripting/v8_wrapper.cpp109
-rw-r--r--scripting/v8_wrapper.h4
-rw-r--r--shell/collection.js72
-rw-r--r--shell/db.js30
-rw-r--r--shell/dbshell.cpp65
-rw-r--r--shell/mongo.js2
-rw-r--r--shell/mongo_vstudio.cpp2795
-rw-r--r--shell/query.js38
-rw-r--r--shell/servers.js150
-rw-r--r--shell/utils.cpp504
-rw-r--r--shell/utils.h23
-rw-r--r--shell/utils.js54
-rw-r--r--stdafx.cpp2
-rw-r--r--stdafx.h1
-rw-r--r--tools/bridge.cpp2
-rw-r--r--tools/dump.cpp4
-rw-r--r--tools/export.cpp4
-rw-r--r--tools/files.cpp2
-rw-r--r--tools/import.cpp59
-rw-r--r--tools/restore.cpp39
-rw-r--r--tools/sniffer.cpp107
-rw-r--r--tools/stat.cpp194
-rw-r--r--tools/tool.cpp410
-rw-r--r--tools/tool.h23
-rw-r--r--util/allocator.h12
-rw-r--r--util/array.h104
-rw-r--r--util/assert_util.cpp46
-rw-r--r--util/assert_util.h37
-rw-r--r--util/atomic_int.h100
-rw-r--r--util/background.cpp4
-rw-r--r--util/background.h3
-rw-r--r--util/base64.cpp39
-rw-r--r--util/base64.h37
-rw-r--r--util/builder.h8
-rw-r--r--util/debug_util.cpp2
-rw-r--r--util/file_allocator.h39
-rw-r--r--util/goodies.h248
-rw-r--r--util/hashtab.h2
-rw-r--r--util/hex.h35
-rw-r--r--util/httpclient.cpp87
-rw-r--r--util/httpclient.h28
-rw-r--r--util/log.h7
-rw-r--r--util/message.cpp42
-rw-r--r--util/message.h34
-rw-r--r--util/message_server_asio.cpp82
-rw-r--r--util/message_server_port.cpp2
-rw-r--r--util/miniwebserver.cpp31
-rw-r--r--util/miniwebserver.h7
-rw-r--r--util/mmap.cpp25
-rw-r--r--util/mmap.h9
-rw-r--r--util/mmap_mm.cpp7
-rw-r--r--util/mmap_posix.cpp14
-rw-r--r--util/mmap_win.cpp26
-rw-r--r--util/optime.h23
-rw-r--r--util/processinfo.h3
-rw-r--r--util/processinfo_darwin.cpp26
-rw-r--r--util/processinfo_linux2.cpp21
-rw-r--r--util/processinfo_none.cpp9
-rw-r--r--util/processinfo_win32.cpp10
-rw-r--r--util/queue.h12
-rw-r--r--util/sock.cpp4
-rw-r--r--util/sock.h10
-rw-r--r--util/thread_pool.cpp10
-rw-r--r--util/thread_pool.h2
-rw-r--r--util/top.cpp18
-rw-r--r--util/util.cpp18
360 files changed, 20297 insertions, 6790 deletions
diff --git a/.gitignore b/.gitignore
index 3256d02..0d83f60 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,7 +5,10 @@
*~
*.o
+*.os
+*.obj
*.aps
+*.ilk
*.tar.gz
*.suo
*.ncb
@@ -38,6 +41,7 @@ config.log
settings.py
buildinfo.cpp
tags
+TAGS
#temp dirs
dump
@@ -63,6 +67,7 @@ mongoexport
mongoimport
mongosniff
mongobridge
+mongostat
*.tgz
*.zip
@@ -84,12 +89,14 @@ test
authTest
perftest
clientTest
+httpClientTest
#debian
build-stamp
configure-stamp
debian/mongodb
-debian/mongodb.*
+# This was inserted 2009-08-16, nobody knows why.
+#debian/mongodb.*
#osx
.DS_Store
diff --git a/APACHE-2.0.txt b/APACHE-2.0.txt
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/APACHE-2.0.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README b/README
index 1fe5926..0341176 100644
--- a/README
+++ b/README
@@ -30,3 +30,14 @@ NOTES
Mongo uses memory mapped files. If built as a 32 bit executable, you will
not be able to work with large (multi-gigabyte) databases. However, 32 bit
builds work fine with small development databases.
+
+
+LICENSING
+
+ Most MongoDB source files are made available under the terms of the
+ GNU Affero General Public License (AGPL). See individual files for
+ details.
+
+ As an exception, the files in the debian/ directory, the rpm/
+ directory, and all subdirectories thereof are made available under
+ the terms of the Apache License, version 2.0.
diff --git a/SConstruct b/SConstruct
index 5233083..8a17e69 100644
--- a/SConstruct
+++ b/SConstruct
@@ -20,6 +20,7 @@ import urllib
import urllib2
import buildscripts
import buildscripts.bb
+from buildscripts import utils
buildscripts.bb.checkOk()
@@ -40,6 +41,14 @@ AddOption('--distname',
metavar='DIR',
help='dist name (0.8.0)')
+AddOption('--distmod',
+ dest='distmod',
+ type='string',
+ nargs=1,
+ action='store',
+ metavar='DIR',
+ help='additional piece for full dist name')
+
AddOption( "--64",
dest="force64",
@@ -154,6 +163,13 @@ AddOption( "--noshell",
action="store",
help="don't build shell" )
+AddOption( "--safeshell",
+ dest="safeshell",
+ type="string",
+ nargs=0,
+ action="store",
+ help="don't let shell scripts run programs (still, don't run untrusted scripts)" )
+
AddOption( "--extrapath",
dest="extrapath",
type="string",
@@ -176,6 +192,20 @@ AddOption( "--extralib",
action="store",
help="comma seperated list of libraries (--extralib js_static,readline" )
+AddOption( "--staticlib",
+ dest="staticlib",
+ type="string",
+ nargs=1,
+ action="store",
+ help="comma seperated list of libs to link statically (--staticlib js_static,boost_program_options-mt,..." )
+
+AddOption( "--staticlibpath",
+ dest="staticlibpath",
+ type="string",
+ nargs=1,
+ action="store",
+ help="comma seperated list of dirs to search for staticlib arguments" )
+
AddOption( "--cxx",
dest="cxx",
type="string",
@@ -208,6 +238,22 @@ AddOption( "--pg",
nargs=0,
action="store" )
+AddOption( "--gdbserver",
+ dest="gdbserver",
+ type="string",
+ nargs=0,
+ action="store" )
+
+AddOption("--nostrip",
+ dest="nostrip",
+ action="store_true",
+ help="do not strip installed binaries")
+
+AddOption("--sharedclient",
+ dest="sharedclient",
+ action="store",
+ help="build a libmongoclient.so/.dll")
+
# --- environment setup ---
def removeIfInList( lst , thing ):
@@ -255,6 +301,8 @@ usejvm = not GetOption( "usejvm" ) is None
asio = not GetOption( "asio" ) is None
+justClientLib = (COMMAND_LINE_TARGETS == ['mongoclient'])
+
env = Environment( MSVS_ARCH=msarch , tools = ["default", "gch"], toolpath = '.' )
if GetOption( "cxx" ) is not None:
env["CC"] = GetOption( "cxx" )
@@ -267,6 +315,8 @@ env.Append( CPPDEFINES=[ "_SCONS" ] )
env.Append( CPPPATH=[ "." ] )
+if GetOption( "safeshell" ) != None:
+ env.Append( CPPDEFINES=[ "MONGO_SAFE_SHELL" ] )
boostCompiler = GetOption( "boostCompiler" )
if boostCompiler is None:
@@ -284,7 +334,7 @@ if ( usesm and usejvm ):
print( "can't say usesm and usejvm at the same time" )
Exit(1)
-if ( not ( usesm or usejvm or usev8 ) ):
+if ( not ( usesm or usejvm or usev8 or justClientLib) ):
usesm = True
extraLibPlaces = []
@@ -309,13 +359,13 @@ if GetOption( "extralib" ) is not None:
# ------ SOURCE FILE SETUP -----------
-commonFiles = Split( "stdafx.cpp buildinfo.cpp db/jsobj.cpp db/json.cpp db/commands.cpp db/lasterror.cpp db/nonce.cpp db/queryutil.cpp shell/mongo.cpp" )
-commonFiles += [ "util/background.cpp" , "util/mmap.cpp" , "util/sock.cpp" , "util/util.cpp" , "util/top.cpp" , "util/message.cpp" ,
+commonFiles = Split( "stdafx.cpp buildinfo.cpp db/common.cpp db/jsobj.cpp db/json.cpp db/lasterror.cpp db/nonce.cpp db/queryutil.cpp db/cmdline.cpp shell/mongo.cpp" )
+commonFiles += [ "util/background.cpp" , "util/mmap.cpp" , "util/sock.cpp" , "util/util.cpp" , "util/message.cpp" ,
"util/assert_util.cpp" , "util/httpclient.cpp" , "util/md5main.cpp" , "util/base64.cpp", "util/debug_util.cpp",
"util/thread_pool.cpp" ]
commonFiles += Glob( "util/*.c" )
commonFiles += Split( "client/connpool.cpp client/dbclient.cpp client/model.cpp client/parallel.cpp client/syncclusterconnection.cpp" )
-commonFiles += [ "scripting/engine.cpp" ]
+commonFiles += [ "scripting/engine.cpp" , "scripting/utils.cpp" ]
#mmap stuff
@@ -331,12 +381,15 @@ if os.path.exists( "util/processinfo_" + os.sys.platform + ".cpp" ):
else:
commonFiles += [ "util/processinfo_none.cpp" ]
-coreDbFiles = []
+coreDbFiles = [ "db/commands.cpp" ]
coreServerFiles = [ "util/message_server_port.cpp" , "util/message_server_asio.cpp" ]
-serverOnlyFiles = Split( "db/query.cpp db/update.cpp db/introspect.cpp db/btree.cpp db/clientcursor.cpp db/tests.cpp db/repl.cpp db/btreecursor.cpp db/cloner.cpp db/namespace.cpp db/matcher.cpp db/dbeval.cpp db/dbwebserver.cpp db/dbhelpers.cpp db/instance.cpp db/dbstats.cpp db/database.cpp db/pdfile.cpp db/index.cpp db/cursor.cpp db/security_commands.cpp db/client.cpp db/security.cpp util/miniwebserver.cpp db/storage.cpp db/reccache.cpp db/queryoptimizer.cpp db/extsort.cpp db/mr.cpp s/d_util.cpp" )
+serverOnlyFiles = Split( "db/query.cpp db/update.cpp db/introspect.cpp db/btree.cpp db/clientcursor.cpp db/tests.cpp db/repl.cpp db/btreecursor.cpp db/cloner.cpp db/namespace.cpp db/matcher.cpp db/dbeval.cpp db/dbwebserver.cpp db/dbhelpers.cpp db/instance.cpp db/database.cpp db/pdfile.cpp db/cursor.cpp db/security_commands.cpp db/client.cpp db/security.cpp util/miniwebserver.cpp db/storage.cpp db/reccache.cpp db/queryoptimizer.cpp db/extsort.cpp db/mr.cpp s/d_util.cpp" )
+serverOnlyFiles += [ "db/index.cpp" ] + Glob( "db/index_*.cpp" )
serverOnlyFiles += Glob( "db/dbcommands*.cpp" )
+serverOnlyFiles += Glob( "db/stats/*.cpp" )
+serverOnlyFiles += [ "db/driverHelpers.cpp" ]
if usesm:
commonFiles += [ "scripting/engine_spidermonkey.cpp" ]
@@ -344,7 +397,7 @@ if usesm:
elif usev8:
commonFiles += [ Glob( "scripting/*v8*.cpp" ) ]
nojni = True
-elif not nojni:
+elif not (nojni or justClientLib) :
commonFiles += [ "scripting/engine_java.cpp" ]
else:
commonFiles += [ "scripting/engine_none.cpp" ]
@@ -415,6 +468,9 @@ def choosePathExist( choices , default=None):
return c
return default
+def filterExists(paths):
+ return filter(os.path.exists, paths)
+
if "darwin" == os.sys.platform:
darwin = True
platform = "osx" # prettier than darwin
@@ -437,8 +493,8 @@ if "darwin" == os.sys.platform:
if installDir == DEFAULT_INSTALl_DIR and not distBuild:
installDir = "/usr/64/"
else:
- env.Append( CPPPATH=[ "/sw/include" , "/opt/local/include"] )
- env.Append( LIBPATH=["/sw/lib/", "/opt/local/lib"] )
+ env.Append( CPPPATH=filterExists(["/sw/include" , "/opt/local/include"]) )
+ env.Append( LIBPATH=filterExists(["/sw/lib/", "/opt/local/lib"]) )
elif "linux2" == os.sys.platform:
linux = True
@@ -494,12 +550,13 @@ elif "win32" == os.sys.platform:
env['ENV'] = dict(os.environ)
def find_boost():
- for bv in reversed( range(33,50) ):
- for extra in ('', '_0', '_1'):
- boostDir = "C:/Program Files/Boost/boost_1_" + str(bv) + extra
- if os.path.exists( boostDir ):
- return boostDir
- return None
+ for x in ('', ' (x86)'):
+ for bv in reversed( range(33,50) ):
+ for extra in ('', '_0', '_1'):
+ boostDir = "C:/Program Files" + x + "/Boost/boost_1_" + str(bv) + extra
+ if os.path.exists( boostDir ):
+ return boostDir
+ return None
boostDir = find_boost()
if boostDir is None:
@@ -515,7 +572,7 @@ elif "win32" == os.sys.platform:
env.Append(CPPPATH=["../js/src/"])
env.Append(LIBPATH=["../js/src"])
env.Append( CPPDEFINES=[ "OLDJS" ] )
- else:
+ elif not justClientLib:
javaHome = findVersion( "C:/Program Files/java/" ,
[ "jdk" , "jdk1.6.0_10" ] )
env.Append( CPPPATH=[ javaHome + "/include" , javaHome + "/include/win32" ] )
@@ -541,6 +598,7 @@ elif "win32" == os.sys.platform:
env.Append( CPPDEFINES=[ "_DEBUG" ] )
env.Append( CPPFLAGS=" /Od /Gm /RTC1 /MDd /ZI " )
env.Append( CPPFLAGS=' /Fd"mongod.pdb" ' )
+ env.Append( LINKFLAGS=" /incremental:yes /debug " )
env.Append( LIBPATH=[ boostDir + "/Lib" ] )
if force64:
@@ -608,7 +666,7 @@ if nix:
env.Append( LIBS=[] )
if debugBuild:
- env.Append( CPPFLAGS=" -O0 -fstack-protector -fstack-check" );
+ env.Append( CPPFLAGS=" -O0 -fstack-protector " );
else:
env.Append( CPPFLAGS=" -O3" )
@@ -628,6 +686,9 @@ if nix:
if GetOption( "profile" ) is not None:
env.Append( LIBS=[ "profiler" ] )
+ if GetOption( "gdbserver" ) is not None:
+ env.Append( CPPDEFINES=["USE_GDBSERVER"] )
+
# pre-compiled headers
if False and 'Gch' in dir( env ):
print( "using precompiled headers" )
@@ -666,9 +727,13 @@ def getGitBranch():
def getGitBranchString( prefix="" , postfix="" ):
t = re.compile( '[/\\\]' ).split( os.getcwd() )
if len(t) > 2 and t[len(t)-1] == "mongo":
- t = re.compile( ".*_([vV]\d+\.\d+)$" ).match( t[len(t)-2] )
- if t is not None:
- return prefix + t.group(1).lower() + postfix
+ par = t[len(t)-2]
+ m = re.compile( ".*_([vV]\d+\.\d+)$" ).match( par )
+ if m is not None:
+ return prefix + m.group(1).lower() + postfix
+ if par.find("Nightly") > 0:
+ return ""
+
b = getGitBranch()
if b == None or b == "master":
@@ -878,6 +943,35 @@ def doConfigure( myenv , needJava=True , needPcre=True , shell=False ):
myCheckLib( "execinfo", True )
env.Append( LIBS=[ "execinfo" ] )
+ # Handle staticlib,staticlibpath options.
+ staticlibfiles = []
+ if GetOption( "staticlib" ) is not None:
+ # FIXME: probably this loop ought to do something clever
+ # depending on whether we want to use 32bit or 64bit
+ # libraries. For now, we sort of rely on the user supplying a
+ # sensible staticlibpath option. (myCheckLib implements an
+ # analogous search, but it also does other things I don't
+ # understand, so I'm not using it.)
+ if GetOption ( "staticlibpath" ) is not None:
+ dirs = GetOption ( "staticlibpath" ).split( "," )
+ else:
+ dirs = [ "/usr/lib64", "/usr/lib" ]
+
+ for l in GetOption( "staticlib" ).split( "," ):
+ removeIfInList(myenv["LIBS"], l)
+ found = False
+ for d in dirs:
+ f= "%s/lib%s.a" % ( d, l )
+ if os.path.exists( f ):
+ staticlibfiles.append(f)
+ found = True
+ break
+ if not found:
+ raise "can't find a static %s" % l
+
+ myenv.Append(LINKCOM=" $STATICFILES")
+ myenv.Append(STATICFILES=staticlibfiles)
+
return conf.Finish()
env = doConfigure( env )
@@ -990,13 +1084,10 @@ Default( mongod )
# tools
allToolFiles = commonFiles + coreDbFiles + serverOnlyFiles + [ "client/gridfs.cpp", "tools/tool.cpp" ]
-env.Program( "mongodump" , allToolFiles + [ "tools/dump.cpp" ] )
-env.Program( "mongorestore" , allToolFiles + [ "tools/restore.cpp" ] )
-
-env.Program( "mongoexport" , allToolFiles + [ "tools/export.cpp" ] )
-env.Program( "mongoimport" , allToolFiles + [ "tools/import.cpp" ] )
-
-env.Program( "mongofiles" , allToolFiles + [ "tools/files.cpp" ] )
+normalTools = [ "dump" , "restore" , "export" , "import" , "files" , "stat" ]
+env.Alias( "tools" , [ "mongo" + x for x in normalTools ] )
+for x in normalTools:
+ env.Program( "mongo" + x , allToolFiles + [ "tools/" + x + ".cpp" ] )
env.Program( "mongobridge" , allToolFiles + [ "tools/bridge.cpp" ] )
@@ -1005,6 +1096,8 @@ mongos = env.Program( "mongos" , commonFiles + coreDbFiles + coreServerFiles + s
# c++ library
clientLibName = str( env.Library( "mongoclient" , allClientFiles )[0] )
+if GetOption( "sharedclient" ):
+ sharedClientLibName = str( env.SharedLibrary( "mongoclient" , allClientFiles )[0] )
env.Library( "mongotestfiles" , commonFiles + coreDbFiles + serverOnlyFiles + ["client/gridfs.cpp"])
clientTests = []
@@ -1014,6 +1107,7 @@ clientTests += [ clientEnv.Program( "firstExample" , [ "client/examples/first.cp
clientTests += [ clientEnv.Program( "secondExample" , [ "client/examples/second.cpp" ] ) ]
clientTests += [ clientEnv.Program( "whereExample" , [ "client/examples/whereExample.cpp" ] ) ]
clientTests += [ clientEnv.Program( "authTest" , [ "client/examples/authTest.cpp" ] ) ]
+clientTests += [ clientEnv.Program( "httpClientTest" , [ "client/examples/httpClientTest.cpp" ] ) ]
# testing
test = testEnv.Program( "test" , Glob( "dbtests/*.cpp" ) )
@@ -1058,8 +1152,8 @@ elif not onlyServer:
shellEnv["LINKFLAGS"].remove("-m64")
shellEnv["CPPPATH"].remove( "/usr/64/include" )
shellEnv["LIBPATH"].remove( "/usr/64/lib" )
- shellEnv.Append( CPPPATH=[ "/sw/include" , "/opt/local/include"] )
- shellEnv.Append( LIBPATH=[ "/sw/lib/", "/opt/local/lib" , "/usr/lib" ] )
+ shellEnv.Append( CPPPATH=filterExists(["/sw/include" , "/opt/local/include"]) )
+ shellEnv.Append( LIBPATH=filterExists(["/sw/lib/", "/opt/local/lib" , "/usr/lib"]) )
l = shellEnv["LIBS"]
if linux64:
@@ -1154,7 +1248,9 @@ def jsSpec( suffix ):
return apply( os.path.join, args )
def jsDirTestSpec( dir ):
- return mongo[0].abspath + " --nodb " + jsSpec( [ dir ] )
+ path = jsSpec( [ dir + '/*.js' ] )
+ paths = [x.abspath for x in Glob( path ) ]
+ return mongo[0].abspath + " --nodb " + ' '.join( paths )
def runShellTest( env, target, source ):
global mongodForTestsPort
@@ -1163,13 +1259,15 @@ def runShellTest( env, target, source ):
if target == "smokeJs":
spec = [ jsSpec( [ "_runner.js" ] ) ]
elif target == "smokeQuota":
- g = Glob( jsSpec( [ "quota" ] ) )
+ g = Glob( jsSpec( [ "quota/*.js" ] ) )
spec = [ x.abspath for x in g ]
elif target == "smokeJsPerf":
- g = Glob( jsSpec( [ "perf" ] ) )
+ g = Glob( jsSpec( [ "perf/*.js" ] ) )
spec = [ x.abspath for x in g ]
elif target == "smokeJsSlow":
spec = [x.abspath for x in Glob(jsSpec(["slow/*"]))]
+ elif target == "smokeParallel":
+ spec = [x.abspath for x in Glob(jsSpec(["parallel/*"]))]
else:
print( "invalid target for runShellTest()" )
Exit( 1 )
@@ -1181,6 +1279,8 @@ if not onlyServer and not noshell:
addSmoketest( "smokeClone", [ "mongo", "mongod" ], [ jsDirTestSpec( "clone" ) ] )
addSmoketest( "smokeRepl", [ "mongo", "mongod", "mongobridge" ], [ jsDirTestSpec( "repl" ) ] )
addSmoketest( "smokeDisk", [ add_exe( "mongo" ), add_exe( "mongod" ) ], [ jsDirTestSpec( "disk" ) ] )
+ addSmoketest( "smokeAuth", [ add_exe( "mongo" ), add_exe( "mongod" ) ], [ jsDirTestSpec( "auth" ) ] )
+ addSmoketest( "smokeParallel", [ add_exe( "mongo" ), add_exe( "mongod" ) ], runShellTest )
addSmoketest( "smokeSharding", [ "mongo", "mongod", "mongos" ], [ jsDirTestSpec( "sharding" ) ] )
addSmoketest( "smokeJsPerf", [ "mongo" ], runShellTest )
addSmoketest("smokeJsSlow", [add_exe("mongo")], runShellTest)
@@ -1190,27 +1290,32 @@ if not onlyServer and not noshell:
mongodForTests = None
mongodForTestsPort = "27017"
-def startMongodForTests( env, target, source ):
+def startMongodWithArgs(*args):
global mongodForTests
global mongodForTestsPort
global mongod
if mongodForTests:
return
- mongodForTestsPort = "40000"
+ mongodForTestsPort = "32000"
import os
ensureTestDirs()
dirName = "/data/db/sconsTests/"
ensureDir( dirName )
from subprocess import Popen
- mongodForTests = Popen( [ mongod[0].abspath, "--port", mongodForTestsPort, "--dbpath", dirName, "--nohttpinterface" ] )
- # Wait for mongod to start
- import time
- time.sleep( 5 )
- if mongodForTests.poll() is not None:
+ mongodForTests = Popen([mongod[0].abspath, "--port", mongodForTestsPort,
+ "--dbpath", dirName] + list(args))
+
+ if not utils.didMongodStart( 32000 ):
print( "Failed to start mongod" )
mongodForTests = None
Exit( 1 )
+def startMongodForTests( env, target, source ):
+ return startMongodWithArgs()
+
+def startMongodSmallOplog(env, target, source):
+ return startMongodWithArgs("--master", "--oplogSize", "10")
+
def stopMongodForTests():
global mongodForTests
if not mongodForTests:
@@ -1235,6 +1340,10 @@ testEnv.Alias( "startMongod", [add_exe("mongod")], [startMongodForTests] );
testEnv.AlwaysBuild( "startMongod" );
testEnv.SideEffect( "dummySmokeSideEffect", "startMongod" )
+testEnv.Alias( "startMongodSmallOplog", [add_exe("mongod")], [startMongodSmallOplog] );
+testEnv.AlwaysBuild( "startMongodSmallOplog" );
+testEnv.SideEffect( "dummySmokeSideEffect", "startMongodSmallOplog" )
+
def addMongodReqTargets( env, target, source ):
mongodReqTargets = [ "smokeClient", "smokeJs", "smokeQuota" ]
for target in mongodReqTargets:
@@ -1244,7 +1353,7 @@ def addMongodReqTargets( env, target, source ):
testEnv.Alias( "addMongodReqTargets", [], [addMongodReqTargets] )
testEnv.AlwaysBuild( "addMongodReqTargets" )
-testEnv.Alias( "smokeAll", [ "smoke", "mongosTest", "smokeClone", "smokeRepl", "addMongodReqTargets", "smokeDisk", "smokeSharding", "smokeTool" ] )
+testEnv.Alias( "smokeAll", [ "smoke", "mongosTest", "smokeClone", "smokeRepl", "addMongodReqTargets", "smokeDisk", "smokeAuth", "smokeSharding", "smokeTool" ] )
testEnv.AlwaysBuild( "smokeAll" )
def addMongodReqNoJsTargets( env, target, source ):
@@ -1317,6 +1426,11 @@ def getSystemInstallName():
except:
pass
+
+ dn = GetOption( "distmod" )
+ if dn and len(dn) > 0:
+ n = n + "-" + dn
+
return n
def getCodeVersion():
@@ -1377,7 +1491,7 @@ def installBinary( e , name ):
fullInstallName = installDir + "/bin/" + name
allBinaries += [ name ]
- if solaris or linux:
+ if (solaris or linux) and (not GetOption("nostrip")):
e.AddPostAction( inst, e.Action( 'strip ' + fullInstallName ) )
if linux and len( COMMAND_LINE_TARGETS ) == 1 and str( COMMAND_LINE_TARGETS[0] ) == "s3dist":
@@ -1386,13 +1500,8 @@ def installBinary( e , name ):
if nix:
e.AddPostAction( inst , e.Action( 'chmod 755 ' + fullInstallName ) )
-installBinary( env , "mongodump" )
-installBinary( env , "mongorestore" )
-
-installBinary( env , "mongoexport" )
-installBinary( env , "mongoimport" )
-
-installBinary( env , "mongofiles" )
+for x in normalTools:
+ installBinary( env , "mongo" + x )
if mongosniff_built:
installBinary(env, "mongosniff")
@@ -1537,3 +1646,11 @@ def clean_old_dist_builds(env, target, source):
env.Alias("dist_clean", [], [clean_old_dist_builds])
env.AlwaysBuild("dist_clean")
+
+# --- an uninstall target ---
+if len(COMMAND_LINE_TARGETS) > 0 and 'uninstall' in COMMAND_LINE_TARGETS:
+ SetOption("clean", 1)
+ # By inspection, changing COMMAND_LINE_TARGETS here doesn't do
+ # what we want, but changing BUILD_TARGETS does.
+ BUILD_TARGETS.remove("uninstall")
+ BUILD_TARGETS.append("install")
diff --git a/buildscripts/__init__.py b/buildscripts/__init__.py
index 7f1b703..839da9d 100644
--- a/buildscripts/__init__.py
+++ b/buildscripts/__init__.py
@@ -1,10 +1,13 @@
+import hacks_mandriva
import hacks_ubuntu
import os;
def findHacks( un ):
if un[0] == 'Linux' and (os.path.exists("/etc/debian_version") or
- os.path.exists("/etc/arch-release") or
un[3].find("Ubuntu") >= 0):
return hacks_ubuntu
+ if un[0] == 'Linux' and (os.path.exists("/etc/mandriva-release") or
+ un[3].find("mnb") >= 0):
+ return hacks_mandriva
return None
diff --git a/buildscripts/bb.py b/buildscripts/bb.py
index 1e87828..e1e36f6 100644
--- a/buildscripts/bb.py
+++ b/buildscripts/bb.py
@@ -16,7 +16,7 @@ def checkOk():
print( "excpted version [" + m + "]" )
from subprocess import Popen, PIPE
- diff = Popen( [ "git", "diff", "origin/v1.2" ], stdout=PIPE ).communicate()[ 0 ]
+ diff = Popen( [ "git", "diff", "origin/v1.4" ], stdout=PIPE ).communicate()[ 0 ]
if len(diff) > 0:
print( diff )
raise Exception( "build bot broken?" )
diff --git a/buildscripts/cleanbb.py b/buildscripts/cleanbb.py
new file mode 100644
index 0000000..68a8012
--- /dev/null
+++ b/buildscripts/cleanbb.py
@@ -0,0 +1,43 @@
+
+import sys
+import os
+import utils
+import time
+
+def killprocs( signal="" ):
+ cwd = os.getcwd();
+ if cwd.find("buildscripts" ) > 0 :
+ cwd = cwd.partition( "buildscripts" )[0]
+
+ killed = 0
+
+ for x in utils.getprocesslist():
+ x = x.lstrip()
+ if x.find( cwd ) < 0:
+ continue
+
+ pid = x.partition( " " )[0]
+ print( "killing: " + x )
+ utils.execsys( "/bin/kill " + signal + " " + pid )
+ killed = killed + 1
+
+ return killed
+
+
+def cleanup( root ):
+
+ # delete all regular files, directories can stay
+ # NOTE: if we delete directories later, we can't delete diskfulltest
+ for ( dirpath , dirnames , filenames ) in os.walk( root , topdown=False ):
+ for x in filenames:
+ os.remove( dirpath + "/" + x )
+
+ if killprocs() > 0:
+ time.sleep(3)
+ killprocs("-9")
+
+if __name__ == "__main__":
+ root = "/data/db/"
+ if len( sys.argv ) > 1:
+ root = sys.argv[1]
+ cleanup( root )
diff --git a/buildscripts/confluence_export.py b/buildscripts/confluence_export.py
new file mode 100644
index 0000000..956605b
--- /dev/null
+++ b/buildscripts/confluence_export.py
@@ -0,0 +1,82 @@
+#! /usr/bin/env python
+
+# Export the contents on confluence
+#
+# Dependencies:
+# - suds
+#
+# User: soap, Password: soap
+from __future__ import with_statement
+import cookielib
+import datetime
+import os
+import shutil
+import subprocess
+import sys
+import urllib2
+
+from suds.client import Client
+
+SOAP_URI = "http://mongodb.onconfluence.com/rpc/soap-axis/confluenceservice-v1?wsdl"
+USERNAME = "soap"
+PASSWORD = "soap"
+AUTH_URI = "http://www.mongodb.org/login.action?os_authType=basic"
+TMP_DIR = "confluence-tmp"
+TMP_FILE = "confluence-tmp.zip"
+
+
+def export_and_get_uri():
+ client = Client(SOAP_URI)
+ auth = client.service.login(USERNAME, PASSWORD)
+ return client.service.exportSpace(auth, "DOCS", "TYPE_HTML")
+
+
+def login_and_download(docs):
+ cookie_jar = cookielib.CookieJar()
+ cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar)
+ password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
+ password_manager.add_password(None, AUTH_URI, USERNAME, PASSWORD)
+ auth_handler = urllib2.HTTPBasicAuthHandler(password_manager)
+ urllib2.build_opener(cookie_handler, auth_handler).open(AUTH_URI)
+ return urllib2.build_opener(cookie_handler).open(docs)
+
+
+def extract_to_dir(data, dir):
+ with open(TMP_FILE, "w") as f:
+ f.write(data.read())
+ data.close()
+ # This is all really annoying but zipfile doesn't do extraction on 2.5
+ subprocess.call(["unzip", "-d", dir, TMP_FILE])
+ os.unlink(TMP_FILE)
+
+
+def rmdir(dir):
+ try:
+ shutil.rmtree(dir)
+ except:
+ pass
+
+
+def overwrite(src, dest):
+ target = "%s/DOCS-%s/" % (dest, datetime.date.today())
+ current = "%s/current" % dest
+ rmdir(target)
+ shutil.copytree(src, target)
+ try:
+ os.unlink(current)
+ except:
+ pass
+ os.symlink(os.path.abspath(target), os.path.abspath(current))
+
+
+def main(dir):
+ rmdir(TMP_DIR)
+ extract_to_dir(login_and_download(export_and_get_uri()), TMP_DIR)
+ overwrite("%s/DOCS/" % TMP_DIR, dir)
+
+
+if __name__ == "__main__":
+ try:
+ main(sys.argv[1])
+ except IndexError:
+ print "pass outdir as first arg"
diff --git a/buildscripts/frob_version.py b/buildscripts/frob_version.py
new file mode 100644
index 0000000..7b89e0b
--- /dev/null
+++ b/buildscripts/frob_version.py
@@ -0,0 +1,69 @@
+#!/usr/bin/python
+
+from __future__ import with_statement
+import tempfile
+import sys
+import re
+import os
+
+def opentemp(basename):
+ # The following doesn't work in python before 2.6
+# return tempfile.NamedTemporaryFile('w', -1, ".XXXXXX", basename, '.', False)
+ fname = basename +".TMP"
+ if os.path.exists(fname):
+ raise "not clobbering file %s" % fname
+ return open(fname, 'w')
+
+def frob_debian_changelog(version):
+ fname = 'debian/changelog'
+ with opentemp(fname) as o:
+ with open(fname) as i:
+ lineno = 0
+ for line in i:
+ if lineno == 0:
+ newline = re.sub(r'\([^)]*\)', '('+version+')', line)
+ o.write(newline)
+ else:
+ o.write(line)
+ os.rename(o.name, fname)
+
+def frob_rpm_spec(version):
+ fname = 'rpm/mongo.spec'
+ with opentemp(fname) as o:
+ with open(fname) as i:
+ frobbed = False
+ for line in i:
+ if frobbed:
+ o.write(line)
+ else:
+ if line.find('Version:') == 0:
+ print >> o, 'Version: ' + version
+ frobbed = True
+ else:
+ o.write(line)
+ os.rename(o.name, fname)
+
+def frob_stdafx_cpp(version):
+ fname = 'stdafx.cpp'
+ with opentemp(fname) as o:
+ with open(fname) as i:
+ frobbed = False
+ for line in i:
+ if frobbed:
+ o.write(line)
+ else:
+ if re.search(r'const.*char.*versionString\[\].*=', line):
+ o.write(' const char versionString[] = "%s";' % version)
+ else:
+ o.write(line)
+ os.rename(o.name, fname)
+
+(progname, version) = sys.argv
+if version is None:
+ print >> sys.stderr, 'usage: %s VERSION' % progname
+ sys.exit(1)
+frob_debian_changelog(version)
+frob_rpm_spec(version)
+## I don't yet know what-all cares about the versionString inside the
+## mongo code, so I'm not actually calling this yet.
+# frob_stdafx_cpp(version)
diff --git a/buildscripts/hacks_mandriva.py b/buildscripts/hacks_mandriva.py
new file mode 100644
index 0000000..d461709
--- /dev/null
+++ b/buildscripts/hacks_mandriva.py
@@ -0,0 +1,9 @@
+
+import os
+import glob
+
+def insert( env , options ):
+ jslibPaths = glob.glob('/usr/include/js-*/')
+ if len(jslibPaths) >= 1:
+ jslibPath = jslibPaths.pop()
+ env.Append( CPPPATH=[ jslibPath ] ) \ No newline at end of file
diff --git a/buildscripts/hacks_ubuntu.py b/buildscripts/hacks_ubuntu.py
index 67c5d78..56649f8 100644
--- a/buildscripts/hacks_ubuntu.py
+++ b/buildscripts/hacks_ubuntu.py
@@ -42,6 +42,6 @@ def foundxulrunner( env , options ):
incroot + "unstable/" ] )
env.Append( CPPDEFINES=[ "XULRUNNER" , "OLDJS" ] )
- if best.find( "1.9.0" ) >= 0:
+ if best.find( "1.9.0" ) >= 0 or best.endswith("1.9"):
env.Append( CPPDEFINES=[ "XULRUNNER190" ] )
return True
diff --git a/buildscripts/makedist.py b/buildscripts/makedist.py
new file mode 100644
index 0000000..35383b9
--- /dev/null
+++ b/buildscripts/makedist.py
@@ -0,0 +1,799 @@
+#!/usr/bin/env python
+
+# makedist.py: make a distro package (on an EC2 instance)
+
+# For ease of use, put a file called settings.py someplace in your
+# sys.path, containing something like the following:
+
+# makedist = {
+# # ec2-api-tools needs the following two set in the process
+# # environment.
+# "EC2_HOME": "/path/to/ec2-api-tools",
+# # The EC2 tools won't run at all unless this variable is set to a directory
+# # relative to which a "bin/java" exists.
+# "JAVA_HOME" : "/usr",
+# # All the ec2-api-tools take these two as arguments.
+# # Alternatively, you can set the environment variables EC2_PRIVATE_KEY and EC2_CERT
+# # respectively, leave these two out of settings.py, and let the ec2 tools default.
+# "ec2_pkey": "/path/to/pk-file.pem"
+# "ec2_cert" : "/path/to/cert-file.pem"
+# # This gets supplied to ec2-run-instances to rig up an ssh key for
+# # the remote user.
+# "ec2_sshkey" : "key-id",
+# # And so we need to tell our ssh processes where to find the
+# # appropriate public key file.
+# "ssh_keyfile" : "/path/to/key-id-file"
+# }
+
+# Notes: although there is a Python library for accessing EC2 as a web
+# service, it seemed as if it would be less work to just shell out to
+# the three EC2 management tools we use.
+
+# To make a distribution we must:
+
+# 1. Fire up an EC2 AMI suitable for building.
+# 2. Get any build-dependencies and configurations onto the remote host.
+# 3. Fetch the mongodb source.
+# 4. Run the package building tools.
+# 5. Save the package archives someplace permanent (eventually we
+# ought to install them into a public repository for the distro).
+# Unimplemented:
+# 6. Fire up an EC2 AMI suitable for testing whether the packages
+# install.
+# 7. Check whether the packages install and run.
+
+# The implementations of steps 1, 2, 4, 5, 6, and 7 will depend on the
+# distro of host we're talking to (Ubuntu, CentOS, Debian, etc.).
+
+from __future__ import with_statement
+import subprocess
+import sys
+import signal
+import getopt
+import socket
+import time
+import os.path
+import tempfile
+
+# For the moment, we don't handle any of the errors we raise, so it
+# suffices to have a simple subclass of Exception that just
+# stringifies according to a desired format.
+class SimpleError(Exception):
+ def __init__(self, *args):
+ self.args = args
+ def __str__(self):
+ return self.args[0] % self.args[1:]
+
+class SubcommandError(SimpleError):
+ def __init__(self, *args):
+ self.status = args[2]
+ super(SubcommandError, self).__init__(*args)
+
+class BaseConfigurator (object):
+ def __init__ (self, **kwargs):
+ self.configuration = []
+ self.arch=kwargs["arch"]
+ self.distro_name=kwargs["distro_name"]
+ self.distro_version=kwargs["distro_version"]
+
+ def lookup(self, what, dist, vers, arch):
+ for (wht, seq) in self.configuration:
+ if what == wht:
+ for ((dpat, vpat, apat), payload) in seq:
+ # For the moment, our pattern facility is just "*" or exact match.
+ if ((dist == dpat or dpat == "*") and
+ (vers == vpat or vpat == "*") and
+ (arch == apat or apat == "*")):
+ return payload
+ if getattr(self, what, False):
+ return getattr(self, what)
+ else:
+ raise SimpleError("couldn't find a%s %s configuration for dist=%s, version=%s, arch=%s",
+ "n" if ("aeiouAEIOU".find(what[0]) > -1) else "",
+ what, dist, vers, arch)
+
+ def default(self, what):
+ return self.lookup(what, self.distro_name, self.distro_version, self.arch)
+ def findOrDefault(self, dict, what):
+ return (dict[what] if what in dict else self.lookup(what, self.distro_name, self.distro_version, self.arch))
+
+class BaseHostConfigurator (BaseConfigurator):
+ def __init__(self, **kwargs):
+ super(BaseHostConfigurator, self).__init__(**kwargs)
+ self.configuration += [("distro_arch",
+ ((("debian", "*", "x86_64"), "amd64"),
+ (("ubuntu", "*", "x86_64"), "amd64"),
+ (("debian", "*", "x86"), "i386"),
+ (("ubuntu", "*", "x86"), "i386"),
+ (("centos", "*", "x86_64"), "x86_64"),
+ (("fedora", "*", "x86_64"), "x86_64"),
+ (("centos", "*", "x86"), "i386"),
+ (("fedora", "*", "x86"), "i386"),
+ (("*", "*", "x86_64"), "x86_64"),
+ (("*", "*", "x86"), "x86"))) ,
+ ]
+
+class LocalHost(object):
+ @classmethod
+ def runLocally(cls, argv):
+ print "running %s" % argv
+ r = subprocess.Popen(argv).wait()
+ if r != 0:
+ raise SubcommandError("subcommand %s exited %d", argv, r)
+
+class EC2InstanceConfigurator(BaseConfigurator):
+ def __init__(self, **kwargs):
+ super(EC2InstanceConfigurator, self).__init__(**kwargs)
+ self.configuration += [("ec2_ami",
+ ((("ubuntu", "10.4", "x86_64"), "ami-bf07ead6"),
+ (("ubuntu", "10.4", "x86"), "ami-f707ea9e"),
+ (("ubuntu", "9.10", "x86_64"), "ami-55739e3c"),
+ (("ubuntu", "9.10", "x86"), "ami-bb709dd2"),
+ (("ubuntu", "9.4", "x86_64"), "ami-eef61587"),
+ (("ubuntu", "9.4", "x86"), "ami-ccf615a5"),
+ (("ubuntu", "8.10", "x86"), "ami-c0f615a9"),
+ (("ubuntu", "8.10", "x86_64"), "ami-e2f6158b"),
+ (("ubuntu", "8.4", "x86"), "ami59b35f30"),
+ (("ubuntu", "8.4", "x86_64"), "ami-27b35f4e"),
+ (("debian", "5.0", "x86"), "ami-dcf615b5"),
+ (("debian", "5.0", "x86_64"), "ami-f0f61599"),
+ (("centos", "5.4", "x86"), "ami-f8b35e91"),
+ (("centos", "5.4", "x86_64"), "ami-ccb35ea5"),
+ (("fedora", "8", "x86_64"), "ami-2547a34c"),
+ (("fedora", "8", "x86"), "ami-5647a33f"))),
+ ("ec2_mtype",
+ ((("*", "*", "x86"), "m1.small"),
+ (("*", "*", "x86_64"), "m1.large"))),
+ ]
+
+
+class EC2Instance (object):
+ def __init__(self, configurator, **kwargs):
+ # Stuff we need to start an instance: AMI name, key and cert
+ # files. AMI and mtype default to configuration in this file,
+ # but can be overridden.
+ self.ec2_ami = configurator.findOrDefault(kwargs, "ec2_ami")
+ self.ec2_mtype = configurator.findOrDefault(kwargs, "ec2_mtype")
+
+ self.use_internal_name = True if "use_internal_name" in kwargs else False
+
+ # Authentication stuff defaults according to the conventions
+ # of the ec2-api-tools.
+ self.ec2_cert=kwargs["ec2_cert"]
+ self.ec2_pkey=kwargs["ec2_pkey"]
+ self.ec2_sshkey=kwargs["ec2_sshkey"]
+
+ # FIXME: this needs to be a commandline option
+ self.ec2_groups = ["default", "buildbot-slave", "dist-slave"]
+ self.terminate = False if "no_terminate" in kwargs else True
+
+ def parsedesc (self, hdl):
+ line1=hdl.readline()
+ splitline1=line1.split()
+ (_, reservation, unknown1, groupstr) = splitline1[:4]
+ groups = groupstr.split(',')
+ self.ec2_reservation = reservation
+ self.ec2_unknown1 = unknown1
+ self.ec2_groups = groups
+ # I haven't seen more than 4 data fields in one of these
+ # descriptions, but what do I know?
+ if len(splitline1)>4:
+ print >> sys.stderr, "more than 4 fields in description line 1\n%s\n" % line1
+ self.ec2_extras1 = splitline1[4:]
+ line2=hdl.readline()
+ splitline2=line2.split()
+ # The jerks make it tricky to parse line 2: the fields are
+ # dependent on the instance's state.
+ (_, instance, ami, status_or_hostname) = splitline2[:4]
+ self.ec2_instance = instance
+ if ami != self.ec2_ami:
+ print >> sys.stderr, "warning: AMI in description isn't AMI we invoked\nwe started %s, but got\n%s", (self.ec2_ami, line2)
+ # FIXME: are there other non-running statuses?
+ if status_or_hostname in ["pending", "terminated"]:
+ self.ec2_status = status_or_hostname
+ self.ec2_running = False
+ index = 4
+ self.ec2_storage = splitline2[index+8]
+ else:
+ self.ec2_running = True
+ index = 6
+ self.ec2_status = splitline2[5]
+ self.ec2_external_hostname = splitline2[3]
+ self.ec2_internal_hostname = splitline2[4]
+ self.ec2_external_ipaddr = splitline2[index+8]
+ self.ec2_internal_ipaddr = splitline2[index+9]
+ self.ec2_storage = splitline2[index+10]
+ (sshkey, unknown2, mtype, starttime, zone, unknown3, unknown4, monitoring) = splitline2[index:index+8]
+ # FIXME: potential disagreement with the supplied sshkey?
+ self.ec2_sshkey = sshkey
+ self.ec2_unknown2 = unknown2
+ # FIXME: potential disagreement with the supplied mtype?
+ self.ec2_mtype = mtype
+ self.ec2_starttime = starttime
+ self.ec2_zone = zone
+ self.ec2_unknown3 = unknown3
+ self.ec2_unknown4 = unknown4
+ self.ec2_monitoring = monitoring
+
+ def start(self):
+ "Fire up a fresh EC2 instance."
+ groups = reduce(lambda x, y : x+y, [["-g", i] for i in self.ec2_groups], [])
+ argv = ["ec2-run-instances",
+ self.ec2_ami, "-K", self.ec2_pkey, "-C", self.ec2_cert,
+ "-k", self.ec2_sshkey, "-t", self.ec2_mtype] + groups
+ self.ec2_running = False
+ print "running %s" % argv
+ proc = subprocess.Popen(argv, stdout=subprocess.PIPE)
+ try:
+ self.parsedesc(proc.stdout)
+ if self.ec2_instance == "":
+ raise SimpleError("instance id is empty")
+ else:
+ print "Instance id: %s" % self.ec2_instance
+ finally:
+ r = proc.wait()
+ if r != 0:
+ raise SimpleError("ec2-run-instances exited %d", r)
+
+ def initwait(self):
+ # poll the instance description until we get a hostname.
+ # Note: it seems there can be a time interval after
+ # ec2-run-instance finishes during which EC2 will tell us that
+ # the instance ID doesn't exist. This is sort of bad.
+ state = "pending"
+ numtries = 0
+ giveup = 5
+
+ while not self.ec2_running:
+ time.sleep(15) # arbitrary
+ argv = ["ec2-describe-instances", "-K", self.ec2_pkey, "-C", self.ec2_cert, self.ec2_instance]
+ proc = subprocess.Popen(argv, stdout=subprocess.PIPE)
+ try:
+ self.parsedesc(proc.stdout)
+ except Exception, e:
+ r = proc.wait()
+ if r < giveup:
+ print sys.stderr, str(e)
+ continue
+ else:
+ raise SimpleError("ec2-describe-instances exited %d", r)
+ numtries+=1
+
+ def stop(self):
+ if self.terminate:
+ LocalHost.runLocally(["ec2-terminate-instances", "-K", self.ec2_pkey, "-C", self.ec2_cert, self.ec2_instance])
+ else:
+ print "Not terminating EC2 instance %s." % self.ec2_instance
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.stop()
+
+ def getHostname(self):
+ return self.ec2_internal_hostname if self.use_internal_name else self.ec2_external_hostname
+
+class SshConnectionConfigurator (BaseConfigurator):
+ def __init__(self, **kwargs):
+ super(SshConnectionConfigurator, self).__init__(**kwargs)
+ self.configuration += [("ssh_login",
+ # FLAW: this actually depends more on the AMI
+ # than the triple.
+ ((("debian", "*", "*"), "root"),
+ (("ubuntu", "10.4", "*"), "ubuntu"),
+ (("ubuntu", "9.10", "*"), "ubuntu"),
+ (("ubuntu", "9.4", "*"), "root"),
+ (("ubuntu", "8.10", "*"), "root"),
+ (("ubuntu", "8.4", "*"), "ubuntu"),
+ (("centos", "*", "*"), "root"))),
+ ]
+
+class SshConnection (object):
+ def __init__(self, configurator, **kwargs):
+ # Stuff we need to talk to the thing properly
+ self.ssh_login = configurator.findOrDefault(kwargs, "ssh_login")
+
+ self.ssh_host = kwargs["ssh_host"]
+ self.ssh_keyfile=kwargs["ssh_keyfile"]
+ # Gets set to False when we think we can ssh in.
+ self.sshwait = True
+
+ def sshWait(self):
+ "Poll until somebody's listening on port 22"
+
+ if self.sshwait == False:
+ return
+ while self.sshwait:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ try:
+ s.connect((self.ssh_host, 22))
+ self.sshwait = False
+ print "connected on port 22 (ssh)"
+ time.sleep(15) # arbitrary timeout, in case the
+ # remote sshd is slow.
+ except socket.error, err:
+ pass
+ finally:
+ s.close()
+ time.sleep(3) # arbitrary timeout
+
+ def initSsh(self):
+ self.sshWait()
+ ctlpath="/tmp/ec2-ssh-%s-%s-%s" % (self.ssh_host, self.ssh_login, os.getpid())
+ argv = ["ssh", "-o", "StrictHostKeyChecking no",
+ "-M", "-o", "ControlPath %s" % ctlpath,
+ "-v", "-l", self.ssh_login, "-i", self.ssh_keyfile,
+ self.ssh_host]
+ print "Setting up ssh master connection with %s" % argv
+ self.sshproc = subprocess.Popen(argv)
+ self.ctlpath = ctlpath
+
+
+ def __enter__(self):
+ self.initSsh()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ os.kill(self.sshproc.pid, signal.SIGTERM)
+ self.sshproc.wait()
+
+ def runRemotely(self, argv):
+ """Run a command on the host."""
+ LocalHost.runLocally(["ssh", "-o", "StrictHostKeyChecking no",
+ "-S", self.ctlpath,
+ "-l", self.ssh_login,
+ "-i", self.ssh_keyfile,
+ self.ssh_host] + argv)
+
+ def sendFiles(self, files):
+ self.sshWait()
+ for (localfile, remotefile) in files:
+ LocalHost.runLocally(["scp", "-o", "StrictHostKeyChecking no",
+ "-o", "ControlMaster auto",
+ "-o", "ControlPath %s" % self.ctlpath,
+ "-i", self.ssh_keyfile,
+ "-rv", localfile,
+ self.ssh_login + "@" + self.ssh_host + ":" +
+ ("" if remotefile is None else remotefile) ])
+
+ def recvFiles(self, files):
+ self.sshWait()
+ print files
+ for (remotefile, localfile) in files:
+ LocalHost.runLocally(["scp", "-o", "StrictHostKeyChecking no",
+ "-o", "ControlMaster auto",
+ "-o", "ControlPath %s" % self.ctlpath,
+ "-i", self.ssh_keyfile,
+ "-rv",
+ self.ssh_login + "@" + self.ssh_host +
+ ":" + remotefile,
+ "." if localfile is None else localfile ])
+
+
+class ScriptFileConfigurator (BaseConfigurator):
+ deb_productdir = "dists"
+ rpm_productdir = "/usr/src/redhat/RPMS" # FIXME: this could be
+ # ~/redhat/RPMS or
+ # something elsewhere
+
+ preamble_commands = """
+set -x # verbose execution, for debugging
+set -e # errexit, stop on errors
+"""
+ # Strictly speaking, we don't need to mangle debian files on rpm
+ # systems (and vice versa), but (a) it doesn't hurt anything to do
+ # so, and (b) mangling files the same way everywhere could
+ # conceivably help uncover bugs in the hideous hideous sed
+ # programs we're running here. (N.B., for POSIX wonks: POSIX sed
+ # doesn't support either in-place file editing, which we use
+ # below. So if we end up wanting to run these mangling commands
+ # e.g., on a BSD, we'll need to make them fancier.)
+ mangle_files_commands ="""
+# On debianoids, the package names in the changelog and control file
+# must agree, and only files in a subdirectory of debian/ matching the
+# package name will get included in the .deb, so we also have to mangle
+# the rules file.
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i '1s/.*([^)]*)/{pkg_name}{pkg_name_suffix} ({pkg_version})/' debian/changelog ) || exit 1
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's/^Source:.*/Source: {pkg_name}{pkg_name_suffix}/;
+s/^Package:.*mongodb/Package: {pkg_name}{pkg_name_suffix}\\
+Conflicts: {pkg_name_conflicts}/' debian/control; ) || exit 1
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's|$(CURDIR)/debian/mongodb/|$(CURDIR)/debian/{pkg_name}{pkg_name_suffix}/|g' debian/rules) || exit 1
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's|debian/mongodb.manpages|debian/{pkg_name}{pkg_name_suffix}.manpages|g' debian/rules) || exit 1
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i '/^Name:/s/.*/Name: {pkg_name}{pkg_name_suffix}/; /^Version:/s/.*/Version: {pkg_version}/;' rpm/mongo.spec )
+# Debian systems require some ridiculous workarounds to get an init
+# script at /etc/init.d/mongodb when the packge name isn't the init
+# script name. Note: dh_installinit --name won't work, because that
+# option would require the init script under debian/ to be named
+# mongodb.
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" &&
+ln debian/init.d debian/{pkg_name}{pkg_name_suffix}.mongodb.init &&
+ln debian/mongodb.upstart debian/{pkg_name}{pkg_name_suffix}.mongodb.upstart &&
+sed -i 's/dh_installinit/dh_installinit --name=mongodb/' debian/rules) || exit 1
+"""
+
+ mangle_files_for_ancient_redhat_commands = """
+# Ancient RedHats ship with very old boosts and non-UTF8-aware js
+# libraries, so we need to link statically to those.
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's|^scons.*((inst)all)|scons --prefix=$RPM_BUILD_ROOT/usr --extralib=nspr4 --staticlib=boost_system-mt,boost_thread-mt,boost_filesystem-mt,boost_program_options-mt,js $1|' rpm/mongo.spec )
+"""
+
+ deb_prereq_commands = """
+# Configure debconf to never prompt us for input.
+export DEBIAN_FRONTEND=noninteractive
+apt-get update
+apt-get install -y {pkg_prereq_str}
+"""
+
+ deb_build_commands="""
+mkdir -p "{pkg_product_dir}/{distro_version}/10gen/binary-{distro_arch}"
+mkdir -p "{pkg_product_dir}/{distro_version}/10gen/source"
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}"; debuild ) || exit 1
+# Try installing it
+dpkg -i *.deb
+ps ax | grep mongo || {{ echo "no running mongo" >/dev/stderr; exit 1; }}
+cp {pkg_name}{pkg_name_suffix}*.deb "{pkg_product_dir}/{distro_version}/10gen/binary-{distro_arch}"
+cp {pkg_name}{pkg_name_suffix}*.dsc "{pkg_product_dir}/{distro_version}/10gen/source"
+cp {pkg_name}{pkg_name_suffix}*.tar.gz "{pkg_product_dir}/{distro_version}/10gen/source"
+dpkg-scanpackages "{pkg_product_dir}/{distro_version}/10gen/binary-{distro_arch}" /dev/null | gzip -9c > "{pkg_product_dir}/{distro_version}/10gen/binary-{distro_arch}/Packages.gz"
+dpkg-scansources "{pkg_product_dir}/{distro_version}/10gen/source" /dev/null | gzip -9c > "{pkg_product_dir}/{distro_version}/10gen/source/Sources.gz"
+"""
+ rpm_prereq_commands = """
+rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/{distro_arch}/epel-release-5-3.noarch.rpm
+yum -y install {pkg_prereq_str}
+"""
+ rpm_build_commands="""
+for d in BUILD BUILDROOT RPMS SOURCES SPECS SRPMS; do mkdir -p /usr/src/redhat/$d; done
+cp -v "{pkg_name}{pkg_name_suffix}-{pkg_version}/rpm/mongo.spec" /usr/src/redhat/SPECS
+tar -cpzf /usr/src/redhat/SOURCES/"{pkg_name}{pkg_name_suffix}-{pkg_version}".tar.gz "{pkg_name}{pkg_name_suffix}-{pkg_version}"
+rpmbuild -ba /usr/src/redhat/SPECS/mongo.spec
+"""
+ # FIXME: this is clean, but adds 40 minutes or so to the build process.
+ old_rpm_precommands = """
+yum install -y bzip2-devel python-devel libicu-devel chrpath zlib-devel nspr-devel readline-devel ncurses-devel
+# FIXME: this is just some random URL found on rpmfind some day in 01/2010.
+wget ftp://194.199.20.114/linux/EPEL/5Client/SRPMS/js-1.70-8.el5.src.rpm
+rpm -ivh js-1.70-8.el5.src.rpm
+sed -i 's/XCFLAGS.*$/XCFLAGS=\"%{{optflags}} -fPIC -DJS_C_STRINGS_ARE_UTF8\" \\\\/' /usr/src/redhat/SPECS/js.spec
+rpmbuild -ba /usr/src/redhat/SPECS/js.spec
+rpm -Uvh /usr/src/redhat/RPMS/{distro_arch}/js-1.70-8.{distro_arch}.rpm
+rpm -Uvh /usr/src/redhat/RPMS/{distro_arch}/js-devel-1.70-8.{distro_arch}.rpm
+# FIXME: this is just some random URL found on rpmfind some day in 01/2010.
+wget ftp://195.220.108.108/linux/sourceforge/g/project/gr/gridiron2/support-files/FC10%20source%20RPMs/boost-1.38.0-1.fc10.src.rpm
+rpm -ivh boost-1.38.0-1.fc10.src.rpm
+rpmbuild -ba /usr/src/redhat/SPECS/boost.spec
+rpm -ivh /usr/src/redhat/RPMS/{distro_arch}/boost-1.38.0-1.{distro_arch}.rpm
+rpm -ivh /usr/src/redhat/RPMS/{distro_arch}/boost-devel-1.38.0-1.{distro_arch}.rpm
+"""
+
+ # This horribleness is an attempt to work around ways that you're
+ # not really meant to package things for Debian unless you are
+ # Debian.
+
+ # On very old Debianoids, libboost-<foo>-dev will be some old
+ # boost that's not as thready as we want, but which Eliot says
+ # will work.
+ very_old_deb_prereqs = ["libboost-thread-dev", "libboost-filesystem-dev", "libboost-program-options-dev", "libboost-date-time-dev", "libboost-dev", "xulrunner1.9-dev"]
+
+ # On less old (but still old!) Debianoids, libboost-<foo>-dev is
+ # still a 1.34, but 1.35 packages are available, so we want those.
+ old_deb_prereqs = ["libboost-thread1.35-dev", "libboost-filesystem1.35-dev", "libboost-program-options1.35-dev", "libboost-date-time1.35-dev", "libboost1.35-dev", "xulrunner-dev"]
+
+ # On newer Debianoids, libbost-<foo>-dev is some sufficiently new
+ # thing.
+ new_deb_prereqs = [ "libboost-thread-dev", "libboost-filesystem-dev", "libboost-program-options-dev", "libboost-date-time-dev", "libboost-dev", "xulrunner-dev" ]
+
+ common_deb_prereqs = [ "build-essential", "dpkg-dev", "libreadline-dev", "libpcap-dev", "libpcre3-dev", "git-core", "scons", "debhelper", "devscripts", "git-core" ]
+
+ centos_preqres = ["js-devel", "readline-devel", "pcre-devel", "gcc-c++", "scons", "rpm-build", "git" ]
+ fedora_prereqs = ["js-devel", "readline-devel", "pcre-devel", "gcc-c++", "scons", "rpm-build", "git" ]
+
+ def __init__(self, **kwargs):
+ super(ScriptFileConfigurator, self).__init__(**kwargs)
+ if kwargs["mongo_version"][0] == 'r':
+ self.get_mongo_commands = """
+wget -Otarball.tgz "http://github.com/mongodb/mongo/tarball/{mongo_version}";
+tar xzf tarball.tgz
+mv "`tar tzf tarball.tgz | sed 's|/.*||' | sort -u | head -n1`" "{pkg_name}{pkg_name_suffix}-{pkg_version}"
+"""
+ else:
+ self.get_mongo_commands = """
+git clone git://github.com/mongodb/mongo.git
+"""
+ if kwargs['mongo_version'][0] == 'v':
+ self.get_mongo_commands +="""
+( cd mongo && git archive --prefix="{pkg_name}{pkg_name_suffix}-{pkg_version}/" "`git log origin/{mongo_version} | sed -n '1s/^commit //p;q'`" ) | tar xf -
+"""
+ else:
+ self.get_mongo_commands += """
+( cd mongo && git archive --prefix="{pkg_name}{pkg_name_suffix}-{pkg_version}/" "{mongo_version}" ) | tar xf -
+"""
+
+ if "local_mongo_dir" in kwargs:
+ self.mangle_files_commands = """( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && rm -rf debian rpm && cp -pvR ~/pkg/* . )
+""" + self.mangle_files_commands
+
+ self.configuration += [("pkg_product_dir",
+ ((("ubuntu", "*", "*"), self.deb_productdir),
+ (("debian", "*", "*"), self.deb_productdir),
+ (("fedora", "*", "*"), self.rpm_productdir),
+ (("centos", "*", "*"), self.rpm_productdir))),
+ ("pkg_prereqs",
+ ((("ubuntu", "9.4", "*"),
+ self.old_deb_prereqs + self.common_deb_prereqs),
+ (("ubuntu", "9.10", "*"),
+ self.new_deb_prereqs + self.common_deb_prereqs),
+ (("ubuntu", "10.4", "*"),
+ self.new_deb_prereqs + self.common_deb_prereqs),
+ (("ubuntu", "8.10", "*"),
+ self.old_deb_prereqs + self.common_deb_prereqs),
+ (("ubuntu", "8.4", "*"),
+ self.very_old_deb_prereqs + self.common_deb_prereqs),
+ (("debian", "5.0", "*"),
+ self.old_deb_prereqs + self.common_deb_prereqs),
+ (("fedora", "8", "*"),
+ self.fedora_prereqs),
+ (("centos", "5.4", "*"),
+ self.centos_preqres))),
+ ("commands",
+ ((("debian", "*", "*"),
+ self.preamble_commands + self.deb_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.deb_build_commands),
+ (("ubuntu", "*", "*"),
+ self.preamble_commands + self.deb_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.deb_build_commands),
+ (("centos", "*", "*"),
+ self.preamble_commands + self.old_rpm_precommands + self.rpm_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.mangle_files_for_ancient_redhat_commands + self.rpm_build_commands),
+ (("fedora", "*", "*"),
+ self.preamble_commands + self.old_rpm_precommands + self.rpm_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.rpm_build_commands))),
+ ("pkg_name",
+ ((("debian", "*", "*"), "mongodb"),
+ (("ubuntu", "*", "*"), "mongodb"),
+ (("centos", "*", "*"), "mongo"),
+
+ (("fedora", "*", "*"), "mongo")
+ )),
+ ("pkg_name_conflicts",
+ ((("*", "*", "*"), ["", "-stable", "-unstable", "-snapshot"]),
+ ))
+ ]
+
+
+
+
+class ScriptFile(object):
+ def __init__(self, configurator, **kwargs):
+ self.mongo_version = kwargs["mongo_version"]
+ self.pkg_version = kwargs["pkg_version"]
+ self.pkg_name_suffix = kwargs["pkg_name_suffix"] if "pkg_name_suffix" in kwargs else ""
+ self.pkg_prereqs = configurator.default("pkg_prereqs")
+ self.pkg_name = configurator.default("pkg_name")
+ self.pkg_product_dir = configurator.default("pkg_product_dir")
+ self.pkg_name_conflicts = configurator.default("pkg_name_conflicts") if self.pkg_name_suffix else []
+ self.pkg_name_conflicts.remove(self.pkg_name_suffix) if self.pkg_name_suffix and self.pkg_name_suffix in self.pkg_name_conflicts else []
+ self.formatter = configurator.default("commands")
+ self.distro_name = configurator.default("distro_name")
+ self.distro_version = configurator.default("distro_version")
+ self.distro_arch = configurator.default("distro_arch")
+
+ def genscript(self):
+ return self.formatter.format(mongo_version=self.mongo_version,
+ distro_name=self.distro_name,
+ distro_version=self.distro_version,
+ distro_arch=self.distro_arch,
+ pkg_prereq_str=" ".join(self.pkg_prereqs),
+ pkg_name=self.pkg_name,
+ pkg_name_suffix=self.pkg_name_suffix,
+ pkg_version=self.pkg_version,
+ pkg_product_dir=self.pkg_product_dir,
+ # KLUDGE: rpm specs and deb
+ # control files use
+ # comma-separated conflicts,
+ # but there's no reason to
+ # suppose this works elsewhere
+ pkg_name_conflicts = ", ".join([self.pkg_name+conflict for conflict in self.pkg_name_conflicts])
+ )
+
+ def __enter__(self):
+ self.localscript=None
+ # One of tempfile or I is very stupid.
+ (fh, name) = tempfile.mkstemp('', "makedist.", ".")
+ try:
+ pass
+ finally:
+ os.close(fh)
+ with open(name, 'w+') as fh:
+ fh.write(self.genscript())
+ self.localscript=name
+ return self
+
+ def __exit__(self, type, value, traceback):
+ if self.localscript:
+ os.unlink(self.localscript)
+
+class Configurator(SshConnectionConfigurator, EC2InstanceConfigurator, ScriptFileConfigurator, BaseHostConfigurator):
+ def __init__(self, **kwargs):
+ super(Configurator, self).__init__(**kwargs)
+
+def main():
+# checkEnvironment()
+
+ (kwargs, args) = processArguments()
+ (rootdir, distro_name, distro_version, arch, mongo_version_spec) = args[:5]
+ # FIXME: there are a few other characters that we can't use in
+ # file names on Windows, in case this program really needs to run
+ # there.
+ distro_name = distro_name.replace('/', '-').replace('\\', '-')
+ distro_version = distro_version.replace('/', '-').replace('\\', '-')
+ arch = arch.replace('/', '-').replace('\\', '-')
+ try:
+ import settings
+ if "makedist" in dir ( settings ):
+ for key in ["EC2_HOME", "JAVA_HOME"]:
+ if key in settings.makedist:
+ os.environ[key] = settings.makedist[key]
+ for key in ["ec2_pkey", "ec2_cert", "ec2_sshkey", "ssh_keyfile" ]:
+ if key not in kwargs and key in settings.makedist:
+ kwargs[key] = settings.makedist[key]
+ except Exception, err:
+ print "No settings: %s. Continuing anyway..." % err
+ pass
+
+ # Ensure that PATH contains $EC2_HOME/bin
+ vars = ["EC2_HOME", "JAVA_HOME"]
+ for var in vars:
+ if os.getenv(var) == None:
+ raise SimpleError("Environment variable %s is unset; did you create a settings.py?", var)
+
+ if len([True for x in os.environ["PATH"].split(":") if x.find(os.environ["EC2_HOME"]) > -1]) == 0:
+ os.environ["PATH"]=os.environ["EC2_HOME"]+"/bin:"+os.environ["PATH"]
+
+
+ kwargs["distro_name"] = distro_name
+ kwargs["distro_version"] = distro_version
+ kwargs["arch"] = arch
+
+ foo = mongo_version_spec.split(":")
+ kwargs["mongo_version"] = foo[0] # this can be a commit id, a
+ # release id "r1.2.2", or a
+ # branch name starting with v.
+ if len(foo) > 1:
+ kwargs["pkg_name_suffix"] = foo[1]
+ if len(foo) > 2 and foo[2]:
+ kwargs["pkg_version"] = foo[2]
+ else:
+ kwargs["pkg_version"] = time.strftime("%Y%m%d")
+
+ # FIXME: this should also include the mongo version or something.
+ if "subdirs" in kwargs:
+ kwargs["localdir"] = "%s/%s/%s/%s" % (rootdir, distro_name, distro_version, arch, kwargs["mongo_version"])
+ else:
+ kwargs["localdir"] = rootdir
+
+ if "pkg_name_suffix" not in kwargs:
+ if kwargs["mongo_version"][0] in ["r", "v"]:
+ nums = kwargs["mongo_version"].split(".")
+ if int(nums[1]) % 2 == 0:
+ kwargs["pkg_name_suffix"] = "-stable"
+ else:
+ kwargs["pkg_name_suffix"] = "-unstable"
+ else:
+ kwargs["pkg_name_suffix"] = ""
+
+
+ kwargs['local_gpg_dir'] = kwargs["local_gpg_dir"] if "local_gpg_dir" in kwargs else os.path.expanduser("~/.gnupg")
+ configurator = Configurator(**kwargs)
+ LocalHost.runLocally(["mkdir", "-p", kwargs["localdir"]])
+ with ScriptFile(configurator, **kwargs) as script:
+ with open(script.localscript) as f:
+ print """# Going to run the following on a fresh AMI:"""
+ print f.read()
+ time.sleep(10)
+ with EC2Instance(configurator, **kwargs) as ec2:
+ ec2.initwait()
+ kwargs["ssh_host"] = ec2.getHostname()
+ with SshConnection(configurator, **kwargs) as ssh:
+ ssh.runRemotely(["uname -a; ls /"])
+ ssh.runRemotely(["mkdir", "pkg"])
+ if "local_mongo_dir" in kwargs:
+ ssh.sendFiles([(kwargs["local_mongo_dir"]+'/'+d, "pkg") for d in ["rpm", "debian"]])
+ ssh.sendFiles([(kwargs['local_gpg_dir'], ".gnupg")])
+ ssh.sendFiles([(script.localscript, "makedist.sh")])
+ ssh.runRemotely((["sudo"] if ssh.ssh_login != "root" else [])+ ["sh", "makedist.sh"])
+ ssh.recvFiles([(script.pkg_product_dir, kwargs['localdir'])])
+
+def processArguments():
+ # flagspec [ (short, long, argument?, description, argname)* ]
+ flagspec = [ ("?", "usage", False, "Print a (useless) usage message", None),
+ ("h", "help", False, "Print a help message and exit", None),
+ ("N", "no-terminate", False, "Leave the EC2 instance running at the end of the job", None),
+ ("S", "subdirs", False, "Create subdirectories of the output directory based on distro name, version, and architecture", None),
+ ("I", "use-internal-name", False, "Use the EC2 internal hostname for sshing", None),
+ (None, "local-gpg-dir", True, "Local directory of gpg junk", "STRING"),
+ (None, "local-mongo-dir", True, "Copy packaging files from local mongo checkout", "DIRECTORY"),
+ ]
+ shortopts = "".join([t[0] + (":" if t[2] else "") for t in flagspec if t[0] is not None])
+ longopts = [t[1] + ("=" if t[2] else "") for t in flagspec]
+
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
+ except getopt.GetoptError, err:
+ print str(err)
+ sys.exit(2)
+
+ # Normalize the getopt-parsed options.
+ kwargs = {}
+ for (opt, arg) in opts:
+ flag = opt
+ opt = opt.lstrip("-")
+ if flag[:2] == '--': #long opt
+ kwargs[opt.replace('-', '_')] = arg
+ elif flag[:1] == "-": #short opt
+ ok = False
+ for tuple in flagspec:
+ if tuple[0] == opt:
+ ok = True
+ kwargs[tuple[1].replace('-', '_')] = arg
+ break
+ if not ok:
+ raise SimpleError("this shouldn't happen: unrecognized option flag: %s", opt)
+ else:
+ raise SimpleError("this shouldn't happen: non-option returned from getopt()")
+
+ if "help" in kwargs:
+ print "Usage: %s [OPTIONS] DIRECTORY DISTRO DISTRO-VERSION ARCHITECTURE MONGO-VERSION-SPEC" % sys.argv[0]
+ print """Build some packages on new EC2 AMI instances, leave packages under DIRECTORY.
+
+MONGO-VERSION-SPEC has the syntax
+Commit(:Pkg-Name-Suffix(:Pkg-Version)). If Commit starts with an 'r',
+build from a tagged release; if Commit starts with a 'v', build from
+the HEAD of a version branch; otherwise, build whatever git commit is
+identified by Commit. Pkg-Name-Suffix gets appended to the package
+name, and defaults to "-stable" and "-unstable" if Commit looks like
+it designates a stable or unstable release/branch, respectively.
+Pkg-Version is used as the package version, and defaults to YYYYMMDD.
+Examples:
+
+ HEAD # build a snapshot of HEAD, name the package
+ # "mongodb", use YYYYMMDD for the version
+
+ HEAD:-snap # build a snapshot of HEAD, name the package
+ # "mongodb-snap", use YYYYMMDD for the version
+
+ HEAD:-snap:123 # build a snapshot of HEAD, name the package
+ # "mongodb-snap", use 123 for the version
+
+ HEAD:-suffix:1.3 # build a snapshot of HEAD, name the package
+ # "mongodb-snapshot", use "1.3 for the version
+
+ r1.2.3 # build a package of the 1.2.3 release, call it "mongodb-stable",
+ # make the package version YYYYMMDD.
+
+ v1.2:-stable: # build a package of the HEAD of the 1.2 branch
+
+ decafbad:-foo:123 # build git commit "decafbad", call the package
+ # "mongodb-foo" with package version 123.
+
+Options:"""
+ for t in flagspec:
+ print "%-20s\t%s." % ("%4s--%s%s:" % ("-%s, " % t[0] if t[0] else "", t[1], ("="+t[4]) if t[4] else ""), t[3])
+ print """
+Mandatory arguments to long options are also mandatory for short
+options. Some EC2 arguments default to (and override) environment
+variables; see the ec2-api-tools documentation."""
+ sys.exit(0)
+
+ if "usage" in kwargs:
+ print "Usage: %s [OPTIONS] OUTPUT-DIR DISTRO-NAME DISTRO-VERSION ARCHITECTURE MONGO-VERSION-SPEC" % sys.argv[0]
+ sys.exit(0)
+
+
+ return (kwargs, args)
+
+
+if __name__ == "__main__":
+ main()
+
+# Examples:
+
+# ./makedist.py --local-gpg-dir=$HOME/10gen/dst/dist-gnupg /tmp/ubuntu ubuntu 8.10 x86_64 HEAD:-snapshot
diff --git a/buildscripts/s3md5.py b/buildscripts/s3md5.py
new file mode 100644
index 0000000..89800cd
--- /dev/null
+++ b/buildscripts/s3md5.py
@@ -0,0 +1,48 @@
+
+import os
+import sys
+
+sys.path.append( "." )
+sys.path.append( ".." )
+sys.path.append( "../../" )
+sys.path.append( "../../../" )
+
+import simples3
+import settings
+import subprocess
+
+# check s3 for md5 hashes
+
+def check_dir( bucket , prefix ):
+
+ zips = {}
+ md5s = {}
+ for ( key , modify , etag , size ) in bucket.listdir( prefix=prefix ):
+ if key.endswith( ".tgz" ) or key.endswith( ".zip" ):
+ zips[key] = etag.replace( '"' , '' )
+ elif key.endswith( ".md5" ):
+ md5s[key] = True
+ elif key.find( "$folder$" ) > 0:
+ pass
+ else:
+ print( "unknown file type: " + key )
+
+ for x in zips:
+ m = x + ".md5"
+ if m in md5s:
+ continue
+
+ print( "need to do: " + x + " " + zips[x] + " to " + m )
+ bucket.put( m , zips[x] , acl="public-read" )
+
+
+def run():
+
+ bucket = simples3.S3Bucket( settings.bucket , settings.id , settings.key )
+
+ for x in [ "osx" , "linux" , "win32" , "sunos5" ]:
+ check_dir( bucket , x )
+
+
+if __name__ == "__main__":
+ run()
diff --git a/buildscripts/utils.py b/buildscripts/utils.py
new file mode 100644
index 0000000..41d6767
--- /dev/null
+++ b/buildscripts/utils.py
@@ -0,0 +1,47 @@
+
+import re
+import socket
+import time
+
+# various utilities that are handy
+
+def execsys( args ):
+ import subprocess
+ if isinstance( args , str ):
+ r = re.compile( "\s+" )
+ args = r.split( args )
+ p = subprocess.Popen( args , stdout=subprocess.PIPE , stderr=subprocess.PIPE )
+ r = p.communicate()
+ return r;
+
+def getprocesslist():
+ raw = ""
+ try:
+ raw = execsys( "/bin/ps -ax" )[0]
+ except Exception,e:
+ print( "can't get processlist: " + str( e ) )
+
+ r = re.compile( "[\r\n]+" )
+ return r.split( raw )
+
+
+def checkMongoPort( port=27017 ):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", port))
+ sock.close()
+
+def didMongodStart( port=27017 , timeout=20 ):
+ while timeout > 0:
+ time.sleep( 1 )
+ try:
+ checkMongoPort( port )
+ return True
+ except Exception,e:
+ print( e )
+ timeout = timeout - 1
+
+ return False
+
+
diff --git a/client/clientOnly.cpp b/client/clientOnly.cpp
index f9fc570..e0f59a9 100644
--- a/client/clientOnly.cpp
+++ b/client/clientOnly.cpp
@@ -22,8 +22,6 @@
namespace mongo {
- CmdLine cmdLine;
-
const char * curNs = "in client mode";
bool dbexitCalled = false;
@@ -41,6 +39,10 @@ namespace mongo {
return dbexitCalled;
}
+ void setupSignals(){
+ // maybe should do SIGPIPE here, not sure
+ }
+
string getDbContext() {
return "in client only mode";
}
@@ -48,6 +50,11 @@ namespace mongo {
bool haveLocalShardingInfo( const string& ns ){
return false;
}
+
+ DBClientBase * createDirectClient(){
+ uassert( 10256 , "no createDirectClient in clientOnly" , 0 );
+ return 0;
+ }
/*
auto_ptr<CursorIterator> Helpers::find( const char *ns , BSONObj query , bool requireIndex ){
uassert( 10000 , "Helpers::find can't be used in client" , 0 );
diff --git a/client/connpool.cpp b/client/connpool.cpp
index b332bae..d69c787 100644
--- a/client/connpool.cpp
+++ b/client/connpool.cpp
@@ -21,23 +21,26 @@
#include "stdafx.h"
#include "connpool.h"
#include "../db/commands.h"
+#include "syncclusterconnection.h"
namespace mongo {
DBConnectionPool pool;
DBClientBase* DBConnectionPool::get(const string& host) {
- boostlock L(poolMutex);
+ scoped_lock L(poolMutex);
PoolForHost *&p = pools[host];
if ( p == 0 )
p = new PoolForHost();
if ( p->pool.empty() ) {
- string errmsg;
+ int numCommas = DBClientBase::countCommas( host );
DBClientBase *c;
- if( host.find(',') == string::npos ) {
+
+ if( numCommas == 0 ) {
DBClientConnection *cc = new DBClientConnection(true);
log(2) << "creating new connection for pool to:" << host << endl;
+ string errmsg;
if ( !cc->connect(host.c_str(), errmsg) ) {
delete cc;
uassert( 11002 , (string)"dbconnectionpool: connect failed " + host , false);
@@ -46,7 +49,7 @@ namespace mongo {
c = cc;
onCreate( c );
}
- else {
+ else if ( numCommas == 1 ) {
DBClientPaired *p = new DBClientPaired();
if( !p->connect(host) ) {
delete p;
@@ -55,6 +58,12 @@ namespace mongo {
}
c = p;
}
+ else if ( numCommas == 2 ) {
+ c = new SyncClusterConnection( host );
+ }
+ else {
+ uassert( 13071 , (string)"invalid hostname [" + host + "]" , 0 );
+ }
return c;
}
DBClientBase *c = p->pool.top();
@@ -64,7 +73,7 @@ namespace mongo {
}
void DBConnectionPool::flush(){
- boostlock L(poolMutex);
+ scoped_lock L(poolMutex);
for ( map<string,PoolForHost*>::iterator i = pools.begin(); i != pools.end(); i++ ){
PoolForHost* p = i->second;
@@ -105,9 +114,19 @@ namespace mongo {
}
}
+ ScopedDbConnection::~ScopedDbConnection() {
+ if ( _conn && ! _conn->isFailed() ) {
+ /* see done() comments above for why we log this line */
+ log() << "~ScopedDBConnection: _conn != null" << endl;
+ kill();
+ }
+ }
+
+
class PoolFlushCmd : public Command {
public:
PoolFlushCmd() : Command( "connpoolsync" ){}
+ virtual LockType locktype(){ return NONE; }
virtual bool run(const char*, mongo::BSONObj&, std::string&, mongo::BSONObjBuilder& result, bool){
pool.flush();
result << "ok" << 1;
diff --git a/client/connpool.h b/client/connpool.h
index 34ed498..5a47b01 100644
--- a/client/connpool.h
+++ b/client/connpool.h
@@ -51,7 +51,7 @@ namespace mongo {
}
*/
class DBConnectionPool {
- boost::mutex poolMutex;
+ mongo::mutex poolMutex;
map<string,PoolForHost*> pools; // servername -> pool
list<DBConnectionHook*> _hooks;
@@ -63,7 +63,7 @@ namespace mongo {
void release(const string& host, DBClientBase *c) {
if ( c->isFailed() )
return;
- boostlock L(poolMutex);
+ scoped_lock L(poolMutex);
pools[host]->pool.push(c);
}
void addHook( DBConnectionHook * hook );
@@ -122,14 +122,9 @@ namespace mongo {
pool.release(host, _conn);
_conn = 0;
}
+
+ ~ScopedDbConnection();
- ~ScopedDbConnection() {
- if ( _conn && ! _conn->isFailed() ) {
- /* see done() comments above for why we log this line */
- log() << "~ScopedDBConnection: _conn != null" << endl;
- kill();
- }
- }
};
} // namespace mongo
diff --git a/client/dbclient.cpp b/client/dbclient.cpp
index 165981d..d505c9f 100644
--- a/client/dbclient.cpp
+++ b/client/dbclient.cpp
@@ -105,7 +105,7 @@ namespace mongo {
/* --- dbclientcommands --- */
- inline bool DBClientWithCommands::isOk(const BSONObj& o) {
+ bool DBClientWithCommands::isOk(const BSONObj& o) {
return o.getIntField("ok") == 1;
}
@@ -233,11 +233,11 @@ namespace mongo {
BSONObj o;
if ( info == 0 ) info = &o;
BSONObjBuilder b;
- b.append("create", ns);
+ string db = nsToDatabase(ns.c_str());
+ b.append("create", ns.c_str() + db.length() + 1);
if ( size ) b.append("size", size);
if ( capped ) b.append("capped", true);
if ( max ) b.append("max", max);
- string db = nsToDatabase(ns.c_str());
return runCommand(db.c_str(), b.done(), *info);
}
@@ -448,7 +448,13 @@ namespace mongo {
port = CmdLine::DefaultDBPort;
ip = hostbyname( serverAddress.c_str() );
}
- massert( 10277 , "Unable to parse hostname", !ip.empty() );
+ if( ip.empty() ) {
+ stringstream ss;
+ ss << "client connect: couldn't parse/resolve hostname: " << _serverAddress;
+ errmsg = ss.str();
+ failed = true;
+ return false;
+ }
// we keep around SockAddr for connection life -- maybe MessagingPort
// requires that?
@@ -494,10 +500,10 @@ namespace mongo {
}
auto_ptr<DBClientCursor> DBClientBase::query(const string &ns, Query query, int nToReturn,
- int nToSkip, const BSONObj *fieldsToReturn, int queryOptions) {
+ int nToSkip, const BSONObj *fieldsToReturn, int queryOptions , int batchSize ) {
auto_ptr<DBClientCursor> c( new DBClientCursor( this,
- ns, query.obj, nToReturn, nToSkip,
- fieldsToReturn, queryOptions ) );
+ ns, query.obj, nToReturn, nToSkip,
+ fieldsToReturn, queryOptions , batchSize ) );
if ( c->init() )
return c;
return auto_ptr< DBClientCursor >( 0 );
@@ -562,7 +568,7 @@ namespace mongo {
void DBClientBase::update( const string & ns , Query query , BSONObj obj , bool upsert , bool multi ) {
BufBuilder b;
- b.append( (int)0 ); // reserverd
+ b.append( (int)0 ); // reserved
b.append( ns );
int flags = 0;
@@ -740,10 +746,19 @@ namespace mongo {
}
}
+ int DBClientCursor::nextBatchSize(){
+ if ( nToReturn == 0 )
+ return batchSize;
+ if ( batchSize == 0 )
+ return nToReturn;
+
+ return batchSize < nToReturn ? batchSize : nToReturn;
+ }
+
bool DBClientCursor::init() {
Message toSend;
if ( !cursorId ) {
- assembleRequest( ns, query, nToReturn, nToSkip, fieldsToReturn, opts, toSend );
+ assembleRequest( ns, query, nextBatchSize() , nToSkip, fieldsToReturn, opts, toSend );
} else {
BufBuilder b;
b.append( opts );
@@ -761,10 +776,14 @@ namespace mongo {
void DBClientCursor::requestMore() {
assert( cursorId && pos == nReturned );
+ if (haveLimit){
+ nToReturn -= nReturned;
+ assert(nToReturn > 0);
+ }
BufBuilder b;
b.append(opts);
b.append(ns.c_str());
- b.append(nToReturn);
+ b.append(nextBatchSize());
b.append(cursorId);
Message toSend;
@@ -802,6 +821,12 @@ namespace mongo {
/** If true, safe to call next(). Requests more from server if necessary. */
bool DBClientCursor::more() {
+ if ( !_putBack.empty() )
+ return true;
+
+ if (haveLimit && pos >= nToReturn)
+ return false;
+
if ( pos < nReturned )
return true;
@@ -814,6 +839,11 @@ namespace mongo {
BSONObj DBClientCursor::next() {
assert( more() );
+ if ( !_putBack.empty() ) {
+ BSONObj ret = _putBack.top();
+ _putBack.pop();
+ return ret;
+ }
pos++;
BSONObj o(data);
data += o.objsize();
@@ -821,18 +851,19 @@ namespace mongo {
}
DBClientCursor::~DBClientCursor() {
- if ( cursorId && _ownCursor ) {
- BufBuilder b;
- b.append( (int)0 ); // reserved
- b.append( (int)1 ); // number
- b.append( cursorId );
+ DESTRUCTOR_GUARD (
+ if ( cursorId && _ownCursor ) {
+ BufBuilder b;
+ b.append( (int)0 ); // reserved
+ b.append( (int)1 ); // number
+ b.append( cursorId );
- Message m;
- m.setData( dbKillCursors , b.buf() , b.len() );
-
- connector->sayPiggyBack( m );
- }
+ Message m;
+ m.setData( dbKillCursors , b.buf() , b.len() );
+ connector->sayPiggyBack( m );
+ }
+ );
}
/* --- class dbclientpaired --- */
@@ -945,9 +976,9 @@ namespace mongo {
}
auto_ptr<DBClientCursor> DBClientPaired::query(const string &a, Query b, int c, int d,
- const BSONObj *e, int f)
+ const BSONObj *e, int f, int g)
{
- return checkMaster().query(a,b,c,d,e,f);
+ return checkMaster().query(a,b,c,d,e,f,g);
}
BSONObj DBClientPaired::findOne(const string &a, Query b, const BSONObj *c, int d) {
diff --git a/client/dbclient.h b/client/dbclient.h
index e3f1675..ebd3b73 100644
--- a/client/dbclient.h
+++ b/client/dbclient.h
@@ -21,6 +21,7 @@
#include "../util/message.h"
#include "../db/jsobj.h"
#include "../db/json.h"
+#include <stack>
namespace mongo {
@@ -205,6 +206,13 @@ namespace mongo {
/** If true, safe to call next(). Requests more from server if necessary. */
bool more();
+ /** If true, there is more in our local buffers to be fetched via next(). Returns
+ false when a getMore request back to server would be required. You can use this
+ if you want to exhaust whatever data has been fetched to the client already but
+ then perhaps stop.
+ */
+ bool moreInCurrentBatch() { return !_putBack.empty() || pos < nReturned; }
+
/** next
@return next object in the result cursor.
on an error at the remote server, you will get back:
@@ -212,6 +220,11 @@ namespace mongo {
if you do not want to handle that yourself, call nextSafe().
*/
BSONObj next();
+
+ /**
+ restore an object previously returned by next() to the cursor
+ */
+ void putBack( const BSONObj &o ) { _putBack.push( o.getOwned() ); }
/** throws AssertionException if get back { $err : ... } */
BSONObj nextSafe() {
@@ -246,19 +259,25 @@ namespace mongo {
return (opts & QueryOption_CursorTailable) != 0;
}
+ /** see QueryResult::ResultFlagType (db/dbmessage.h) for flag values
+ mostly these flags are for internal purposes -
+ ResultFlag_ErrSet is the possible exception to that
+ */
bool hasResultFlag( int flag ){
return (resultFlags & flag) != 0;
}
- public:
+
DBClientCursor( DBConnector *_connector, const string &_ns, BSONObj _query, int _nToReturn,
- int _nToSkip, const BSONObj *_fieldsToReturn, int queryOptions ) :
+ int _nToSkip, const BSONObj *_fieldsToReturn, int queryOptions , int bs ) :
connector(_connector),
ns(_ns),
query(_query),
nToReturn(_nToReturn),
+ haveLimit( _nToReturn > 0 && !(queryOptions & QueryOption_CursorTailable)),
nToSkip(_nToSkip),
fieldsToReturn(_fieldsToReturn),
opts(queryOptions),
+ batchSize(bs),
m(new Message()),
cursorId(),
nReturned(),
@@ -271,6 +290,7 @@ namespace mongo {
connector(_connector),
ns(_ns),
nToReturn( _nToReturn ),
+ haveLimit( _nToReturn > 0 && !(options & QueryOption_CursorTailable)),
opts( options ),
m(new Message()),
cursorId( _cursorId ),
@@ -290,14 +310,20 @@ namespace mongo {
void decouple() { _ownCursor = false; }
private:
+
+ int nextBatchSize();
+
DBConnector *connector;
string ns;
BSONObj query;
int nToReturn;
+ bool haveLimit;
int nToSkip;
const BSONObj *fieldsToReturn;
int opts;
+ int batchSize;
auto_ptr<Message> m;
+ stack< BSONObj > _putBack;
int resultFlags;
long long cursorId;
@@ -315,7 +341,7 @@ namespace mongo {
class DBClientInterface : boost::noncopyable {
public:
virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
- const BSONObj *fieldsToReturn = 0, int queryOptions = 0) = 0;
+ const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 ) = 0;
virtual auto_ptr<DBClientCursor> getMore( const string &ns, long long cursorId, int nToReturn = 0, int options = 0 ) = 0;
@@ -343,7 +369,6 @@ namespace mongo {
Basically just invocations of connection.$cmd.findOne({...});
*/
class DBClientWithCommands : public DBClientInterface {
- bool isOk(const BSONObj&);
set<string> _seenIndexes;
public:
@@ -365,7 +390,7 @@ namespace mongo {
set.
@return true if the command returned "ok".
*/
- bool runCommand(const string &dbname, const BSONObj& cmd, BSONObj &info, int options=0);
+ virtual bool runCommand(const string &dbname, const BSONObj& cmd, BSONObj &info, int options=0);
/** Authorize access to a particular database.
Authentication is separate for each database on the server -- you may authenticate for any
@@ -484,6 +509,7 @@ namespace mongo {
ProfileOff = 0,
ProfileSlow = 1, // log very slow (>100ms) operations
ProfileAll = 2
+
};
bool setDbProfilingLevel(const string &dbname, ProfilingLevel level, BSONObj *info = 0);
bool getDbProfilingLevel(const string &dbname, ProfilingLevel& level, BSONObj *info = 0);
@@ -500,11 +526,11 @@ namespace mongo {
generate a temporary collection and return its name.
returns a result object which contains:
- { result : <collection_name>,
- numObjects : <number_of_objects_scanned>,
- timeMillis : <job_time>,
- ok : <1_if_ok>,
- [, err : <errmsg_if_error>]
+ { result : <collection_name>,
+ numObjects : <number_of_objects_scanned>,
+ timeMillis : <job_time>,
+ ok : <1_if_ok>,
+ [, err : <errmsg_if_error>]
}
For example one might call:
@@ -638,6 +664,9 @@ namespace mongo {
return ns.substr( pos + 1 );
}
+ protected:
+ bool isOk(const BSONObj&);
+
};
/**
@@ -661,7 +690,7 @@ namespace mongo {
@throws AssertionException
*/
virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
- const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+ const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 );
/** @param cursorId id of cursor to retrieve
@return an handle to a previously allocated cursor
@@ -694,6 +723,13 @@ namespace mongo {
virtual bool isFailed() const = 0;
+ static int countCommas( const string& s ){
+ int n = 0;
+ for ( unsigned i=0; i<s.size(); i++ )
+ if ( s[i] == ',' )
+ n++;
+ return n;
+ }
};
class DBClientPaired;
@@ -755,9 +791,9 @@ namespace mongo {
virtual bool auth(const string &dbname, const string &username, const string &pwd, string& errmsg, bool digestPassword = true);
virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
- const BSONObj *fieldsToReturn = 0, int queryOptions = 0) {
+ const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 ) {
checkConnection();
- return DBClientBase::query( ns, query, nToReturn, nToSkip, fieldsToReturn, queryOptions );
+ return DBClientBase::query( ns, query, nToReturn, nToSkip, fieldsToReturn, queryOptions , batchSize );
}
/**
@@ -788,7 +824,6 @@ namespace mongo {
return serverAddress;
}
- protected:
virtual bool call( Message &toSend, Message &response, bool assertOk = true );
virtual void say( Message &toSend );
virtual void sayPiggyBack( Message &toSend );
@@ -835,7 +870,7 @@ namespace mongo {
/** throws userassertion "no master found" */
virtual
auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
- const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+ const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 );
/** throws userassertion "no master found" */
virtual
diff --git a/client/examples/httpClientTest.cpp b/client/examples/httpClientTest.cpp
new file mode 100644
index 0000000..5d6c429
--- /dev/null
+++ b/client/examples/httpClientTest.cpp
@@ -0,0 +1,43 @@
+// httpClientTest.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+
+#include "client/dbclient.h"
+#include "util/httpclient.h"
+
+using namespace mongo;
+
+int main( int argc, const char **argv ) {
+
+ int port = 27017;
+ if ( argc != 1 ) {
+ if ( argc != 3 )
+ throw -12;
+ port = atoi( argv[ 2 ] );
+ }
+ port += 1000;
+
+ stringstream ss;
+ ss << "http://localhost:" << port << "/";
+ string url = ss.str();
+
+ cout << "[" << url << "]" << endl;
+
+ HttpClient c;
+ assert( c.get( url ) == 200 );
+}
diff --git a/client/parallel.cpp b/client/parallel.cpp
index 449f436..bd29013 100644
--- a/client/parallel.cpp
+++ b/client/parallel.cpp
@@ -1,4 +1,20 @@
// parallel.cpp
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
#include "stdafx.h"
#include "parallel.h"
@@ -10,13 +26,12 @@
namespace mongo {
// -------- ClusteredCursor -----------
-
+
ClusteredCursor::ClusteredCursor( QueryMessage& q ){
_ns = q.ns;
_query = q.query.copy();
_options = q.queryOptions;
- if ( q.fields.get() )
- _fields = q.fields->getSpec();
+ _fields = q.fields;
_done = false;
}
@@ -82,8 +97,8 @@ namespace mongo {
// -------- SerialServerClusteredCursor -----------
- SerialServerClusteredCursor::SerialServerClusteredCursor( set<ServerAndQuery> servers , QueryMessage& q , int sortOrder) : ClusteredCursor( q ){
- for ( set<ServerAndQuery>::iterator i = servers.begin(); i!=servers.end(); i++ )
+ SerialServerClusteredCursor::SerialServerClusteredCursor( const set<ServerAndQuery>& servers , QueryMessage& q , int sortOrder) : ClusteredCursor( q ){
+ for ( set<ServerAndQuery>::const_iterator i = servers.begin(); i!=servers.end(); i++ )
_servers.push_back( *i );
if ( sortOrder > 0 )
@@ -119,14 +134,14 @@ namespace mongo {
// -------- ParallelSortClusteredCursor -----------
- ParallelSortClusteredCursor::ParallelSortClusteredCursor( set<ServerAndQuery> servers , QueryMessage& q ,
+ ParallelSortClusteredCursor::ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , QueryMessage& q ,
const BSONObj& sortKey )
: ClusteredCursor( q ) , _servers( servers ){
_sortKey = sortKey.getOwned();
_init();
}
- ParallelSortClusteredCursor::ParallelSortClusteredCursor( set<ServerAndQuery> servers , const string& ns ,
+ ParallelSortClusteredCursor::ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , const string& ns ,
const Query& q ,
int options , const BSONObj& fields )
: ClusteredCursor( ns , q.obj , options , fields ) , _servers( servers ){
diff --git a/client/parallel.h b/client/parallel.h
index 5a22624..88864ae 100644
--- a/client/parallel.h
+++ b/client/parallel.h
@@ -98,7 +98,7 @@ namespace mongo {
*/
class SerialServerClusteredCursor : public ClusteredCursor {
public:
- SerialServerClusteredCursor( set<ServerAndQuery> servers , QueryMessage& q , int sortOrder=0);
+ SerialServerClusteredCursor( const set<ServerAndQuery>& servers , QueryMessage& q , int sortOrder=0);
virtual bool more();
virtual BSONObj next();
virtual string type() const { return "SerialServer"; }
@@ -116,8 +116,8 @@ namespace mongo {
*/
class ParallelSortClusteredCursor : public ClusteredCursor {
public:
- ParallelSortClusteredCursor( set<ServerAndQuery> servers , QueryMessage& q , const BSONObj& sortKey );
- ParallelSortClusteredCursor( set<ServerAndQuery> servers , const string& ns ,
+ ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , QueryMessage& q , const BSONObj& sortKey );
+ ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , const string& ns ,
const Query& q , int options=0, const BSONObj& fields=BSONObj() );
virtual ~ParallelSortClusteredCursor();
virtual bool more();
diff --git a/client/syncclusterconnection.cpp b/client/syncclusterconnection.cpp
index b942709..0a8fc79 100644
--- a/client/syncclusterconnection.cpp
+++ b/client/syncclusterconnection.cpp
@@ -1,13 +1,31 @@
// syncclusterconnection.cpp
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
#include "stdafx.h"
#include "syncclusterconnection.h"
+#include "../db/dbmessage.h"
// error codes 8000-8009
namespace mongo {
- SyncCluterConnection::SyncCluterConnection( string commaSeperated ){
+ SyncClusterConnection::SyncClusterConnection( string commaSeperated ){
+ _address = commaSeperated;
string::size_type idx;
while ( ( idx = commaSeperated.find( ',' ) ) != string::npos ){
string h = commaSeperated.substr( 0 , idx );
@@ -15,27 +33,32 @@ namespace mongo {
_connect( h );
}
_connect( commaSeperated );
- uassert( 8004 , "SyncCluterConnection needs 3 servers" , _conns.size() == 3 );
+ uassert( 8004 , "SyncClusterConnection needs 3 servers" , _conns.size() == 3 );
}
- SyncCluterConnection::SyncCluterConnection( string a , string b , string c ){
+ SyncClusterConnection::SyncClusterConnection( string a , string b , string c ){
+ _address = a + "," + b + "," + c;
// connect to all even if not working
_connect( a );
_connect( b );
_connect( c );
}
- SyncCluterConnection::~SyncCluterConnection(){
+ SyncClusterConnection::SyncClusterConnection( SyncClusterConnection& prev ){
+ assert(0);
+ }
+
+ SyncClusterConnection::~SyncClusterConnection(){
for ( size_t i=0; i<_conns.size(); i++ )
delete _conns[i];
_conns.clear();
}
- bool SyncCluterConnection::prepare( string& errmsg ){
+ bool SyncClusterConnection::prepare( string& errmsg ){
return fsync( errmsg );
}
- bool SyncCluterConnection::fsync( string& errmsg ){
+ bool SyncClusterConnection::fsync( string& errmsg ){
bool ok = true;
errmsg = "";
for ( size_t i=0; i<_conns.size(); i++ ){
@@ -55,7 +78,7 @@ namespace mongo {
return ok;
}
- void SyncCluterConnection::_checkLast(){
+ void SyncClusterConnection::_checkLast(){
vector<BSONObj> all;
vector<string> errors;
@@ -91,27 +114,59 @@ namespace mongo {
if ( ok )
return;
- throw UserException( 8001 , (string)"SyncCluterConnection write op failed: " + err.str() );
+ throw UserException( 8001 , (string)"SyncClusterConnection write op failed: " + err.str() );
}
- void SyncCluterConnection::_connect( string host ){
- log() << "SyncCluterConnection connecting to: " << host << endl;
+ void SyncClusterConnection::_connect( string host ){
+ log() << "SyncClusterConnection connecting to [" << host << "]" << endl;
DBClientConnection * c = new DBClientConnection( true );
string errmsg;
if ( ! c->connect( host , errmsg ) )
- log() << "SyncCluterConnection connect fail to: " << host << " errmsg: " << errmsg << endl;
+ log() << "SyncClusterConnection connect fail to: " << host << " errmsg: " << errmsg << endl;
_conns.push_back( c );
}
- auto_ptr<DBClientCursor> SyncCluterConnection::query(const string &ns, Query query, int nToReturn, int nToSkip,
- const BSONObj *fieldsToReturn, int queryOptions){
+ auto_ptr<DBClientCursor> SyncClusterConnection::query(const string &ns, Query query, int nToReturn, int nToSkip,
+ const BSONObj *fieldsToReturn, int queryOptions, int batchSize ){
- uassert( 10021 , "$cmd not support yet in SyncCluterConnection::query" , ns.find( "$cmd" ) == string::npos );
+ if ( ns.find( ".$cmd" ) != string::npos ){
+ string cmdName = query.obj.firstElement().fieldName();
+
+ int lockType = 0;
+
+ map<string,int>::iterator i = _lockTypes.find( cmdName );
+ if ( i == _lockTypes.end() ){
+ BSONObj info;
+ uassert( 13053 , "help failed" , _commandOnActive( "admin" , BSON( cmdName << "1" << "help" << 1 ) , info ) );
+ lockType = info["lockType"].numberInt();
+ _lockTypes[cmdName] = lockType;
+ }
+ else {
+ lockType = i->second;
+ }
+
+ uassert( 13054 , (string)"write $cmd not supported in SyncClusterConnection: " + cmdName , lockType <= 0 );
+ }
+
+ return _queryOnActive( ns , query , nToReturn , nToSkip , fieldsToReturn , queryOptions , batchSize );
+ }
+ bool SyncClusterConnection::_commandOnActive(const string &dbname, const BSONObj& cmd, BSONObj &info, int options ){
+ auto_ptr<DBClientCursor> cursor = _queryOnActive( dbname + ".$cmd" , cmd , 1 , 0 , 0 , options , 0 );
+ if ( cursor->more() )
+ info = cursor->next().copy();
+ else
+ info = BSONObj();
+ return isOk( info );
+ }
+
+ auto_ptr<DBClientCursor> SyncClusterConnection::_queryOnActive(const string &ns, Query query, int nToReturn, int nToSkip,
+ const BSONObj *fieldsToReturn, int queryOptions, int batchSize ){
+
for ( size_t i=0; i<_conns.size(); i++ ){
try {
auto_ptr<DBClientCursor> cursor =
- _conns[i]->query( ns , query , nToReturn , nToSkip , fieldsToReturn , queryOptions );
+ _conns[i]->query( ns , query , nToReturn , nToSkip , fieldsToReturn , queryOptions , batchSize );
if ( cursor.get() )
return cursor;
log() << "query failed to: " << _conns[i]->toString() << " no data" << endl;
@@ -123,16 +178,16 @@ namespace mongo {
throw UserException( 8002 , "all servers down!" );
}
- auto_ptr<DBClientCursor> SyncCluterConnection::getMore( const string &ns, long long cursorId, int nToReturn, int options ){
- uassert( 10022 , "SyncCluterConnection::getMore not supported yet" , 0);
+ auto_ptr<DBClientCursor> SyncClusterConnection::getMore( const string &ns, long long cursorId, int nToReturn, int options ){
+ uassert( 10022 , "SyncClusterConnection::getMore not supported yet" , 0);
auto_ptr<DBClientCursor> c;
return c;
}
- void SyncCluterConnection::insert( const string &ns, BSONObj obj ){
+ void SyncClusterConnection::insert( const string &ns, BSONObj obj ){
string errmsg;
if ( ! prepare( errmsg ) )
- throw UserException( 8003 , (string)"SyncCluterConnection::insert prepare failed: " + errmsg );
+ throw UserException( 8003 , (string)"SyncClusterConnection::insert prepare failed: " + errmsg );
for ( size_t i=0; i<_conns.size(); i++ ){
_conns[i]->insert( ns , obj );
@@ -141,25 +196,60 @@ namespace mongo {
_checkLast();
}
- void SyncCluterConnection::insert( const string &ns, const vector< BSONObj >& v ){
- uassert( 10023 , "SyncCluterConnection bulk insert not implemented" , 0);
+ void SyncClusterConnection::insert( const string &ns, const vector< BSONObj >& v ){
+ uassert( 10023 , "SyncClusterConnection bulk insert not implemented" , 0);
}
- void SyncCluterConnection::remove( const string &ns , Query query, bool justOne ){ assert(0); }
+ void SyncClusterConnection::remove( const string &ns , Query query, bool justOne ){
+ assert(0);
+ }
- void SyncCluterConnection::update( const string &ns , Query query , BSONObj obj , bool upsert , bool multi ){ assert(0); }
+ void SyncClusterConnection::update( const string &ns , Query query , BSONObj obj , bool upsert , bool multi ){
+ string errmsg;
+ if ( ! prepare( errmsg ) )
+ throw UserException( 8005 , (string)"SyncClusterConnection::udpate prepare failed: " + errmsg );
- string SyncCluterConnection::toString(){
- stringstream ss;
- ss << "SyncCluterConnection [";
for ( size_t i=0; i<_conns.size(); i++ ){
- if ( i > 0 )
- ss << ",";
- ss << _conns[i]->toString();
+ _conns[i]->update( ns , query , obj , upsert , multi );
}
- ss << "]";
+
+ _checkLast();
+ }
+
+ string SyncClusterConnection::_toString() const {
+ stringstream ss;
+ ss << "SyncClusterConnection [" << _address << "]";
return ss.str();
}
+ bool SyncClusterConnection::call( Message &toSend, Message &response, bool assertOk ){
+ uassert( 8006 , "SyncClusterConnection::call can only be used directly for dbQuery" ,
+ toSend.operation() == dbQuery );
+
+ DbMessage d( toSend );
+ uassert( 8007 , "SyncClusterConnection::call can't handle $cmd" , strstr( d.getns(), "$cmd" ) == 0 );
+
+ for ( size_t i=0; i<_conns.size(); i++ ){
+ try {
+ bool ok = _conns[i]->call( toSend , response , assertOk );
+ if ( ok )
+ return ok;
+ log() << "call failed to: " << _conns[i]->toString() << " no data" << endl;
+ }
+ catch ( ... ){
+ log() << "call failed to: " << _conns[i]->toString() << " exception" << endl;
+ }
+ }
+ throw UserException( 8008 , "all servers down!" );
+ }
+
+ void SyncClusterConnection::say( Message &toSend ){
+ assert(0);
+ }
+
+ void SyncClusterConnection::sayPiggyBack( Message &toSend ){
+ assert(0);
+ }
+
}
diff --git a/client/syncclusterconnection.h b/client/syncclusterconnection.h
index c14a9bb..e3411e1 100644
--- a/client/syncclusterconnection.h
+++ b/client/syncclusterconnection.h
@@ -1,4 +1,20 @@
// syncclusterconnection.h
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
#include "../stdafx.h"
#include "dbclient.h"
@@ -9,14 +25,14 @@ namespace mongo {
* this is a connection to a cluster of servers that operate as one
* for super high durability
*/
- class SyncCluterConnection : public DBClientWithCommands {
+ class SyncClusterConnection : public DBClientBase {
public:
/**
* @param commaSeperated should be 3 hosts comma seperated
*/
- SyncCluterConnection( string commaSeperated );
- SyncCluterConnection( string a , string b , string c );
- ~SyncCluterConnection();
+ SyncClusterConnection( string commaSeperated );
+ SyncClusterConnection( string a , string b , string c );
+ ~SyncClusterConnection();
/**
@@ -32,7 +48,7 @@ namespace mongo {
// --- from DBClientInterface
virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn, int nToSkip,
- const BSONObj *fieldsToReturn, int queryOptions);
+ const BSONObj *fieldsToReturn, int queryOptions, int batchSize );
virtual auto_ptr<DBClientCursor> getMore( const string &ns, long long cursorId, int nToReturn, int options );
@@ -44,13 +60,40 @@ namespace mongo {
virtual void update( const string &ns , Query query , BSONObj obj , bool upsert , bool multi );
- virtual string toString();
+ virtual string toString(){
+ return _toString();
+ }
+
+ virtual bool call( Message &toSend, Message &response, bool assertOk );
+ virtual void say( Message &toSend );
+ virtual void sayPiggyBack( Message &toSend );
+
+ virtual string getServerAddress() const { return _address; }
+
+ virtual bool isFailed() const {
+ return false;
+ }
+
private:
+ SyncClusterConnection( SyncClusterConnection& prev );
+
+ string _toString() const;
+
+ bool _commandOnActive(const string &dbname, const BSONObj& cmd, BSONObj &info, int options=0);
+
+ auto_ptr<DBClientCursor> _queryOnActive(const string &ns, Query query, int nToReturn, int nToSkip,
+ const BSONObj *fieldsToReturn, int queryOptions, int batchSize );
+
+ bool _isReadOnly( const string& name );
+
void _checkLast();
void _connect( string host );
+
+ string _address;
vector<DBClientConnection*> _conns;
+ map<string,int> _lockTypes;
};
diff --git a/db/background.h b/db/background.h
new file mode 100644
index 0000000..24ea1cb
--- /dev/null
+++ b/db/background.h
@@ -0,0 +1,56 @@
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/* background.h
+
+ Concurrency coordination for administrative operations.
+*/
+
+#pragma once
+
+namespace mongo {
+
+ /* these are administrative operations / jobs
+ for a namespace running in the background, and that only one
+ at a time per namespace is permitted, and that if in progress,
+ you aren't allowed to do other NamespaceDetails major manipulations
+ (such as dropping ns or db) even in the foreground and must
+ instead uassert.
+
+ It's assumed this is not for super-high RPS things, so we don't do
+ anything special in the implementation here to be fast.
+ */
+ class BackgroundOperation : public boost::noncopyable {
+ public:
+ static bool inProgForDb(const char *db);
+ static bool inProgForNs(const char *ns);
+ static void assertNoBgOpInProgForDb(const char *db);
+ static void assertNoBgOpInProgForNs(const char *ns);
+ static void dump(stringstream&);
+
+ /* check for in progress before instantiating */
+ BackgroundOperation(const char *ns);
+
+ virtual ~BackgroundOperation();
+
+ private:
+ NamespaceString _ns;
+ static map<string, unsigned> dbsInProg;
+ static set<string> nsInProg;
+ };
+
+} // namespace mongo
+
diff --git a/db/btree.cpp b/db/btree.cpp
index 8b910f5..18f9e76 100644
--- a/db/btree.cpp
+++ b/db/btree.cpp
@@ -25,6 +25,7 @@
#include "client.h"
#include "dbhelpers.h"
#include "curop.h"
+#include "stats/counters.h"
namespace mongo {
@@ -41,6 +42,11 @@ namespace mongo {
const int split_debug = 0;
const int insert_debug = 0;
+ static void alreadyInIndex() {
+ // we don't use massert() here as that does logging and this is 'benign' - see catches in _indexRecord()
+ throw MsgAssertionException(10287, "btree: key+recloc already in index");
+ }
+
/* BucketBasics --------------------------------------------------- */
inline void BucketBasics::modified(const DiskLoc& thisLoc) {
@@ -356,9 +362,36 @@ namespace mongo {
return false;
}
+ /* @param self - don't complain about ourself already being in the index case.
+ @return true = there is a duplicate.
+ */
+ bool BtreeBucket::wouldCreateDup(
+ const IndexDetails& idx, DiskLoc thisLoc,
+ const BSONObj& key, BSONObj order,
+ DiskLoc self)
+ {
+ int pos;
+ bool found;
+ DiskLoc b = locate(idx, thisLoc, key, order, pos, found, minDiskLoc);
+
+ while ( !b.isNull() ) {
+ // we skip unused keys
+ BtreeBucket *bucket = b.btree();
+ _KeyNode& kn = bucket->k(pos);
+ if ( kn.isUsed() ) {
+ if( bucket->keyAt(pos).woEqual(key) )
+ return kn.recordLoc != self;
+ break;
+ }
+ b = bucket->advance(b, pos, 1, "BtreeBucket::dupCheck");
+ }
+
+ return false;
+ }
+
string BtreeBucket::dupKeyError( const IndexDetails& idx , const BSONObj& key ){
stringstream ss;
- ss << "E11000 duplicate key error";
+ ss << "E11000 duplicate key error ";
ss << "index: " << idx.indexNamespace() << " ";
ss << "dup key: " << key;
return ss.str();
@@ -391,6 +424,9 @@ namespace mongo {
}
}
#endif
+
+ globalIndexCounters.btree( (char*)this );
+
/* binary search for this key */
bool dupsChecked = false;
int l=0;
@@ -407,12 +443,19 @@ namespace mongo {
// coding effort in here to make this particularly fast
if( !dupsChecked ) {
dupsChecked = true;
- if( idx.head.btree()->exists(idx, idx.head, key, order) )
- uasserted( ASSERT_ID_DUPKEY , dupKeyError( idx , key ) );
+ if( idx.head.btree()->exists(idx, idx.head, key, order) ) {
+ if( idx.head.btree()->wouldCreateDup(idx, idx.head, key, order, recordLoc) )
+ uasserted( ASSERT_ID_DUPKEY , dupKeyError( idx , key ) );
+ else
+ alreadyInIndex();
+ }
}
}
- else
+ else {
+ if( M.recordLoc == recordLoc )
+ alreadyInIndex();
uasserted( ASSERT_ID_DUPKEY , dupKeyError( idx , key ) );
+ }
}
// dup keys allowed. use recordLoc as if it is part of the key
@@ -444,7 +487,7 @@ namespace mongo {
}
void BtreeBucket::delBucket(const DiskLoc& thisLoc, IndexDetails& id) {
- ClientCursor::informAboutToDeleteBucket(thisLoc);
+ ClientCursor::informAboutToDeleteBucket(thisLoc); // slow...
assert( !isHead() );
BtreeBucket *p = parent.btreemod();
@@ -466,6 +509,10 @@ namespace mongo {
assert(false);
}
found:
+ deallocBucket( thisLoc );
+ }
+
+ void BtreeBucket::deallocBucket(const DiskLoc &thisLoc) {
#if 1
/* as a temporary defensive measure, we zap the whole bucket, AND don't truly delete
it (meaning it is ineligible for reuse).
@@ -807,13 +854,15 @@ found:
return 0;
}
- out() << "_insert(): key already exists in index\n";
- out() << " " << idx.indexNamespace().c_str() << " thisLoc:" << thisLoc.toString() << '\n';
- out() << " " << key.toString() << '\n';
- out() << " " << "recordLoc:" << recordLoc.toString() << " pos:" << pos << endl;
- out() << " old l r: " << childForPos(pos).toString() << ' ' << childForPos(pos+1).toString() << endl;
- out() << " new l r: " << lChild.toString() << ' ' << rChild.toString() << endl;
- massert( 10287 , "btree: key+recloc already in index", false);
+ DEV {
+ out() << "_insert(): key already exists in index (ok for background:true)\n";
+ out() << " " << idx.indexNamespace().c_str() << " thisLoc:" << thisLoc.toString() << '\n';
+ out() << " " << key.toString() << '\n';
+ out() << " " << "recordLoc:" << recordLoc.toString() << " pos:" << pos << endl;
+ out() << " old l r: " << childForPos(pos).toString() << ' ' << childForPos(pos+1).toString() << endl;
+ out() << " new l r: " << lChild.toString() << ' ' << rChild.toString() << endl;
+ }
+ alreadyInIndex();
}
DEBUGGING out() << "TEMP: key: " << key.toString() << endl;
@@ -926,12 +975,11 @@ namespace mongo {
b->k(1).setUnused();
b->dumpTree(id.head, order);
- cout << "---\n";
b->bt_insert(id.head, A, key, order, false, id);
b->dumpTree(id.head, order);
- cout << "---\n";*/
+ */
// this should assert. does it? (it might "accidentally" though, not asserting proves a problem, asserting proves nothing)
b->bt_insert(id.head, C, key, order, false, id);
@@ -1004,20 +1052,27 @@ namespace mongo {
BSONObj k;
DiskLoc r;
x->popBack(r,k);
- if( x->n == 0 )
- log() << "warning: empty bucket on BtreeBuild " << k.toString() << endl;
+ bool keepX = ( x->n != 0 );
+ DiskLoc keepLoc = keepX ? xloc : x->nextChild;
- if ( ! up->_pushBack(r, k, order, xloc) ){
+ if ( ! up->_pushBack(r, k, order, keepLoc) ){
// current bucket full
DiskLoc n = BtreeBucket::addBucket(idx);
up->tempNext() = n;
upLoc = n;
up = upLoc.btreemod();
- up->pushBack(r, k, order, xloc);
+ up->pushBack(r, k, order, keepLoc);
}
- xloc = x->tempNext(); /* get next in chain at current level */
- x->parent = upLoc;
+ DiskLoc nextLoc = x->tempNext(); /* get next in chain at current level */
+ if ( keepX ) {
+ x->parent = upLoc;
+ } else {
+ if ( !x->nextChild.isNull() )
+ x->nextChild.btreemod()->parent = upLoc;
+ x->deallocBucket( xloc );
+ }
+ xloc = nextLoc;
}
loc = upStart;
diff --git a/db/btree.h b/db/btree.h
index 2c2ab81..b2e9ba9 100644
--- a/db/btree.h
+++ b/db/btree.h
@@ -20,7 +20,7 @@
#include "../stdafx.h"
#include "jsobj.h"
-#include "storage.h"
+#include "diskloc.h"
#include "pdfile.h"
namespace mongo {
@@ -28,8 +28,8 @@ namespace mongo {
#pragma pack(1)
struct _KeyNode {
- DiskLoc prevChildBucket;
- DiskLoc recordLoc;
+ DiskLoc prevChildBucket; // the lchild
+ DiskLoc recordLoc; // location of the record associated with the key
short keyDataOfs() const {
return (short) _kdo;
}
@@ -53,10 +53,10 @@ namespace mongo {
*/
recordLoc.GETOFS() |= 1;
}
- int isUnused() {
+ int isUnused() const {
return recordLoc.getOfs() & 1;
}
- int isUsed() {
+ int isUsed() const {
return !isUnused();
}
};
@@ -85,13 +85,18 @@ namespace mongo {
bool isHead() { return parent.isNull(); }
void assertValid(const BSONObj &order, bool force = false);
int fullValidate(const DiskLoc& thisLoc, const BSONObj &order); /* traverses everything */
- protected:
- void modified(const DiskLoc& thisLoc);
+
KeyNode keyNode(int i) const {
- assert( i < n );
+ if ( i >= n ){
+ massert( 13000 , (string)"invalid keyNode: " + BSON( "i" << i << "n" << n ).jsonString() , i < n );
+ }
return KeyNode(*this, k(i));
}
+ protected:
+
+ void modified(const DiskLoc& thisLoc);
+
char * dataAt(short ofs) {
return data + ofs;
}
@@ -151,6 +156,10 @@ namespace mongo {
ss << " emptySize: " << emptySize << " topSize: " << topSize << endl;
return ss.str();
}
+
+ bool isUsed( int i ) const {
+ return k(i).isUsed();
+ }
protected:
void _shape(int level, stringstream&);
@@ -184,7 +193,13 @@ namespace mongo {
*/
bool exists(const IndexDetails& idx, DiskLoc thisLoc, const BSONObj& key, BSONObj order);
+ bool wouldCreateDup(
+ const IndexDetails& idx, DiskLoc thisLoc,
+ const BSONObj& key, BSONObj order,
+ DiskLoc self);
+
static DiskLoc addBucket(IndexDetails&); /* start a new index off, empty */
+ void deallocBucket(const DiskLoc &thisLoc); // clear bucket memory, placeholder for deallocation
static void renameIndexNamespace(const char *oldNs, const char *newNs);
@@ -256,6 +271,7 @@ namespace mongo {
virtual void noteLocation(); // updates keyAtKeyOfs...
virtual void checkLocation();
+ virtual bool supportGetMore() { return true; }
/* used for multikey index traversal to avoid sending back dups. see Matcher::matches().
if a multikey index traversal:
@@ -318,15 +334,20 @@ namespace mongo {
return key.replaceFieldNames( indexDetails.keyPattern() ).clientReadable();
}
- virtual BSONObj prettyStartKey() const {
- return prettyKey( startKey );
- }
- virtual BSONObj prettyEndKey() const {
- return prettyKey( endKey );
+ virtual BSONObj prettyIndexBounds() const {
+ BSONArrayBuilder ba;
+ if ( bounds_.size() == 0 ) {
+ ba << BSON_ARRAY( prettyKey( startKey ) << prettyKey( endKey ) );
+ } else {
+ for( BoundList::const_iterator i = bounds_.begin(); i != bounds_.end(); ++i ) {
+ ba << BSON_ARRAY( prettyKey( i->first ) << prettyKey( i->second ) );
+ }
+ }
+ return ba.arr();
}
void forgetEndKey() { endKey = BSONObj(); }
-
+
private:
/* Our btrees may (rarely) have "unused" keys when items are deleted.
Skip past them.
@@ -362,6 +383,7 @@ namespace mongo {
DiskLoc locAtKeyOfs;
BoundList bounds_;
unsigned boundIndex_;
+ const IndexSpec& _spec;
};
#pragma pack()
@@ -369,6 +391,9 @@ namespace mongo {
inline bool IndexDetails::hasKey(const BSONObj& key) {
return head.btree()->exists(*this, head, key, keyPattern());
}
+ inline bool IndexDetails::wouldCreateDup(const BSONObj& key, DiskLoc self) {
+ return head.btree()->wouldCreateDup(*this, head, key, keyPattern(), self);
+ }
/* build btree from the bottom up */
/* _ TODO dropDups */
diff --git a/db/btreecursor.cpp b/db/btreecursor.cpp
index bb477d6..ab15c44 100644
--- a/db/btreecursor.cpp
+++ b/db/btreecursor.cpp
@@ -36,7 +36,8 @@ namespace mongo {
indexDetails( _id ),
order( _id.keyPattern() ),
direction( _direction ),
- boundIndex_()
+ boundIndex_(),
+ _spec( _id.getSpec() )
{
audit();
init();
@@ -51,7 +52,8 @@ namespace mongo {
order( _id.keyPattern() ),
direction( _direction ),
bounds_( _bounds ),
- boundIndex_()
+ boundIndex_(),
+ _spec( _id.getSpec() )
{
assert( !bounds_.empty() );
audit();
@@ -74,6 +76,10 @@ namespace mongo {
}
void BtreeCursor::init() {
+ if ( _spec.getType() ){
+ startKey = _spec.getType()->fixKey( startKey );
+ endKey = _spec.getType()->fixKey( endKey );
+ }
bool found;
bucket = indexDetails.head.btree()->
locate(indexDetails, indexDetails.head, startKey, order, keyOfs, found, direction > 0 ? minDiskLoc : maxDiskLoc, direction);
@@ -88,7 +94,7 @@ namespace mongo {
init();
} while ( !ok() && ++boundIndex_ < bounds_.size() );
}
-
+
/* skip unused keys. */
void BtreeCursor::skipUnusedKeys() {
int u = 0;
diff --git a/db/client.cpp b/db/client.cpp
index 68a0c9e..dc82a25 100644
--- a/db/client.cpp
+++ b/db/client.cpp
@@ -1,5 +1,5 @@
-// client.cpp
-
+// client.cpp
+
/**
* Copyright (C) 2009 10gen Inc.
*
@@ -25,40 +25,41 @@
#include "client.h"
#include "curop.h"
#include "json.h"
-
+#include "security.h"
+
namespace mongo {
- boost::mutex Client::clientsMutex;
+ mongo::mutex Client::clientsMutex;
set<Client*> Client::clients; // always be in clientsMutex when manipulating this
boost::thread_specific_ptr<Client> currentClient;
Client::Client(const char *desc) :
- _curOp(new CurOp()),
- _database(0), _ns("")/*, _nsstr("")*/
- ,_shutdown(false),
+ _context(0),
+ _shutdown(false),
_desc(desc),
_god(0)
- {
- ai = new AuthenticationInfo();
- boostlock bl(clientsMutex);
+ {
+ _curOp = new CurOp( this );
+ scoped_lock bl(clientsMutex);
clients.insert(this);
}
Client::~Client() {
delete _curOp;
- delete ai;
- ai = 0;
_god = 0;
- if ( !_shutdown ) {
- cout << "ERROR: Client::shutdown not called!" << endl;
- }
+
+ if ( _context )
+ cout << "ERROR: Client::~Client _context should be NULL: " << _desc << endl;
+ if ( !_shutdown )
+ cout << "ERROR: Client::shutdown not called: " << _desc << endl;
}
bool Client::shutdown(){
_shutdown = true;
-
+ if ( inShutdown() )
+ return false;
{
- boostlock bl(clientsMutex);
+ scoped_lock bl(clientsMutex);
clients.erase(this);
}
@@ -68,8 +69,10 @@ namespace mongo {
didAnything = true;
for ( list<string>::iterator i = _tempCollections.begin(); i!=_tempCollections.end(); i++ ){
string ns = *i;
+ Top::global.collectionDropped( ns );
+
dblock l;
- setClient( ns.c_str() );
+ Client::Context ctx( ns );
if ( ! nsdetails( ns.c_str() ) )
continue;
try {
@@ -88,12 +91,158 @@ namespace mongo {
}
BSONObj CurOp::_tooBig = fromjson("{\"$msg\":\"query not recording (too large)\"}");
- WrappingInt CurOp::_nextOpNum;
+ AtomicUInt CurOp::_nextOpNum;
- Client::Context::Context( string ns , Database * db )
- : _client( currentClient.get() ) {
+ Client::Context::Context( string ns , Database * db, bool doauth )
+ : _client( currentClient.get() ) , _oldContext( _client->_context ) ,
+ _path( dbpath ) , _lock(0) , _justCreated(false) {
assert( db && db->isOk() );
- _client->setns( ns.c_str() , db );
+ _ns = ns;
+ _db = db;
+ _client->_context = this;
+ if ( doauth )
+ _auth();
+ }
+
+ void Client::Context::_finishInit( bool doauth ){
+ int lockState = dbMutex.getState();
+ assert( lockState );
+
+ _db = dbHolder.get( _ns , _path );
+ if ( _db ){
+ _justCreated = false;
+ }
+ else if ( dbMutex.getState() > 0 ){
+ // already in a write lock
+ _db = dbHolder.getOrCreate( _ns , _path , _justCreated );
+ assert( _db );
+ }
+ else if ( dbMutex.getState() < -1 ){
+ // nested read lock :(
+ assert( _lock );
+ _lock->releaseAndWriteLock();
+ _db = dbHolder.getOrCreate( _ns , _path , _justCreated );
+ assert( _db );
+ }
+ else {
+ // we have a read lock, but need to get a write lock for a bit
+ // we need to be in a write lock since we're going to create the DB object
+ // to do that, we're going to unlock, then get a write lock
+ // this is so that if this is the first query and its long doesn't block db
+ // we just have to check that the db wasn't closed in the interim where we unlock
+ for ( int x=0; x<2; x++ ){
+ {
+ dbtemprelease unlock;
+ writelock lk( _ns );
+ dbHolder.getOrCreate( _ns , _path , _justCreated );
+ }
+
+ _db = dbHolder.get( _ns , _path );
+
+ if ( _db )
+ break;
+
+ log() << "db was closed on us right after we opened it: " << _ns << endl;
+ }
+
+ uassert( 13005 , "can't create db, keeps getting closed" , _db );
+ }
+
+ _client->_context = this;
+ _client->_curOp->enter( this );
+ if ( doauth )
+ _auth( lockState );
+ }
+
+ void Client::Context::_auth( int lockState ){
+ if ( _client->_ai.isAuthorizedForLock( _db->name , lockState ) )
+ return;
+
+ // before we assert, do a little cleanup
+ _client->_context = _oldContext; // note: _oldContext may be null
+
+ stringstream ss;
+ ss << "unauthorized for db [" << _db->name << "] lock type: " << lockState << endl;
+ massert( 10057 , ss.str() , 0 );
+ }
+
+ Client::Context::~Context() {
+ DEV assert( _client == currentClient.get() );
+ _client->_curOp->leave( this );
+ _client->_context = _oldContext; // note: _oldContext may be null
+ }
+
+ string Client::toString() const {
+ stringstream ss;
+ if ( _curOp )
+ ss << _curOp->infoNoauth().jsonString();
+ return ss.str();
+ }
+
+ string sayClientState(){
+ Client* c = currentClient.get();
+ if ( ! c )
+ return "no client";
+ return c->toString();
+ }
+
+ void curopWaitingForLock( int type ){
+ Client * c = currentClient.get();
+ assert( c );
+ CurOp * co = c->curop();
+ if ( co ){
+ co->waitingForLock( type );
+ }
+ }
+ void curopGotLock(){
+ Client * c = currentClient.get();
+ assert(c);
+ CurOp * co = c->curop();
+ if ( co ){
+ co->gotLock();
+ }
+ }
+
+ BSONObj CurOp::infoNoauth() {
+ BSONObjBuilder b;
+ b.append("opid", _opNum);
+ bool a = _active && _start;
+ b.append("active", a);
+ if ( _lockType )
+ b.append("lockType" , _lockType > 0 ? "write" : "read" );
+ b.append("waitingForLock" , _waitingForLock );
+
+ if( a ){
+ b.append("secs_running", elapsedSeconds() );
+ }
+
+ b.append( "op" , opToString( _op ) );
+
+ b.append("ns", _ns);
+
+ if( haveQuery() ) {
+ b.append("query", query());
+ }
+ // b.append("inLock", ??
+ stringstream clientStr;
+ clientStr << inet_ntoa( _remote.sin_addr ) << ":" << ntohs( _remote.sin_port );
+ b.append("client", clientStr.str());
+
+ if ( _client )
+ b.append( "desc" , _client->desc() );
+
+ if ( ! _message.empty() ){
+ if ( _progressMeter.isActive() ){
+ StringBuilder buf(128);
+ buf << _message << " " << _progressMeter.toString();
+ b.append( "msg" , buf.str() );
+ }
+ else {
+ b.append( "msg" , _message );
+ }
+ }
+
+ return b.obj();
}
}
diff --git a/db/client.h b/db/client.h
index 99092ca..ab43509 100644
--- a/db/client.h
+++ b/db/client.h
@@ -1,5 +1,5 @@
-// client.h
-
+// client.h
+
/**
* Copyright (C) 2008 10gen Inc.
*
@@ -25,9 +25,10 @@
#pragma once
#include "../stdafx.h"
+#include "security.h"
#include "namespace.h"
#include "lasterror.h"
-#include "../util/top.h"
+#include "stats/top.h"
namespace mongo {
@@ -39,12 +40,9 @@ namespace mongo {
extern boost::thread_specific_ptr<Client> currentClient;
- bool setClient(const char *ns, const string& path=dbpath, mongolock *lock = 0);
-
-
class Client : boost::noncopyable {
public:
- static boost::mutex clientsMutex;
+ static mongo::mutex clientsMutex;
static set<Client*> clients; // always be in clientsMutex when manipulating this
class GodScope {
@@ -57,71 +55,125 @@ namespace mongo {
/* Set database we want to use, then, restores when we finish (are out of scope)
Note this is also helpful if an exception happens as the state if fixed up.
*/
- class Context {
+ class Context : boost::noncopyable{
Client * _client;
- Database * _olddb;
- string _oldns;
+ Context * _oldContext;
+
+ string _path;
+ mongolock * _lock;
+ bool _justCreated;
+
+ string _ns;
+ Database * _db;
+
+ /**
+ * at this point _client, _oldContext and _ns have to be set
+ * _db should not have been touched
+ * this will set _db and create if needed
+ * will also set _client->_context to this
+ */
+ void _finishInit( bool doauth=true);
+
+ void _auth( int lockState = dbMutex.getState() );
public:
- Context(const char *ns)
- : _client( currentClient.get() ) {
- _olddb = _client->_database;
- _oldns = _client->_ns;
- setClient(ns);
- }
- Context(string ns)
- : _client( currentClient.get() ){
- _olddb = _client->_database;
- _oldns = _client->_ns;
- setClient(ns.c_str());
+ Context(const string& ns, string path=dbpath, mongolock * lock = 0 , bool doauth=true )
+ : _client( currentClient.get() ) , _oldContext( _client->_context ) ,
+ _path( path ) , _lock( lock ) ,
+ _ns( ns ){
+ _finishInit( doauth );
}
/* this version saves the context but doesn't yet set the new one: */
- Context()
- : _client( currentClient.get() ) {
- _olddb = _client->database();
- _oldns = _client->ns();
+ Context()
+ : _client( currentClient.get() ) , _oldContext( _client->_context ),
+ _path( dbpath ) , _lock(0) , _justCreated(false){
+ _client->_context = this;
+ clear();
}
/**
* if you are doing this after allowing a write there could be a race condition
* if someone closes that db. this checks that the DB is still valid
*/
- Context( string ns , Database * db );
+ Context( string ns , Database * db, bool doauth=true );
+
+ ~Context();
+
+ Client* getClient() const { return _client; }
+
+ Database* db() const {
+ return _db;
+ }
- ~Context() {
- DEV assert( _client == currentClient.get() );
- _client->setns( _oldns.c_str(), _olddb );
+ const char * ns() const {
+ return _ns.c_str();
+ }
+
+ bool justCreated() const {
+ return _justCreated;
}
- };
+ bool equals( const string& ns , const string& path=dbpath ) const {
+ return _ns == ns && _path == path;
+ }
+
+ bool inDB( const string& db , const string& path=dbpath ) const {
+ if ( _path != path )
+ return false;
+
+ if ( db == _ns )
+ return true;
+
+ string::size_type idx = _ns.find( db );
+ if ( idx != 0 )
+ return false;
+
+ return _ns[db.size()] == '.';
+ }
+ void clear(){
+ _ns = "";
+ _db = 0;
+ }
+
+ /**
+ * call before unlocking, so clear any non-thread safe state
+ */
+ void unlocked(){
+ _db = 0;
+ }
+
+ /**
+ * call after going back into the lock, will re-establish non-thread safe stuff
+ */
+ void relocked(){
+ _finishInit();
+ }
+
+ friend class CurOp;
+ };
+
private:
- CurOp * const _curOp;
- Database *_database;
- Namespace _ns;
- //NamespaceString _nsstr;
+ CurOp * _curOp;
+ Context * _context;
bool _shutdown;
list<string> _tempCollections;
const char *_desc;
bool _god;
+ AuthenticationInfo _ai;
+
public:
- AuthenticationInfo *ai;
- Top top;
+
+ AuthenticationInfo * getAuthenticationInfo(){ return &_ai; }
+ bool isAdmin() { return _ai.isAuthorized( "admin" ); }
CurOp* curop() { return _curOp; }
- Database* database() {
- return _database;
- }
- const char *ns() { return _ns.buf; }
-
- void setns(const char *ns, Database *db) {
- _database = db;
- _ns = ns;
- //_nsstr = ns;
- }
- void clearns() { setns("", 0); }
-
+
+ Context* getContext(){ return _context; }
+ Database* database() { return _context ? _context->db() : 0; }
+ const char *ns() { return _context->ns(); }
+
Client(const char *desc);
~Client();
@@ -143,6 +195,10 @@ namespace mongo {
bool shutdown();
bool isGod() const { return _god; }
+
+ friend class CurOp;
+
+ string toString() const;
};
inline Client& cc() {
@@ -182,12 +238,15 @@ namespace mongo {
dbMutex.unlock_shared();
dbMutex.lock();
- /* this is defensive; as we were unlocked for a moment above,
- the Database object we reference could have been deleted:
- */
- cc().clearns();
+ if ( cc().getContext() )
+ cc().getContext()->unlocked();
}
}
-
+
+ string sayClientState();
+
+ inline bool haveClient(){
+ return currentClient.get() > 0;
+ }
};
diff --git a/db/clientcursor.cpp b/db/clientcursor.cpp
index 0de0b2e..be0bd2f 100644
--- a/db/clientcursor.cpp
+++ b/db/clientcursor.cpp
@@ -36,7 +36,7 @@ namespace mongo {
boost::recursive_mutex ClientCursor::ccmutex;
unsigned ClientCursor::byLocSize() {
- recursive_boostlock lock(ccmutex);
+ recursive_scoped_lock lock(ccmutex);
return byLoc.size();
}
@@ -63,7 +63,7 @@ namespace mongo {
/* todo: this implementation is incomplete. we use it as a prefix for dropDatabase, which
works fine as the prefix will end with '.'. however, when used with drop and
- deleteIndexes, this could take out cursors that belong to something else -- if you
+ dropIndexes, this could take out cursors that belong to something else -- if you
drop "foo", currently, this will kill cursors for "foobar".
*/
void ClientCursor::invalidate(const char *nsPrefix) {
@@ -73,7 +73,7 @@ namespace mongo {
assert( len > 0 && strchr(nsPrefix, '.') );
{
- recursive_boostlock lock(ccmutex);
+ recursive_scoped_lock lock(ccmutex);
for ( CCByLoc::iterator i = byLoc.begin(); i != byLoc.end(); ++i ) {
ClientCursor *cc = i->second;
@@ -88,7 +88,7 @@ namespace mongo {
/* called every 4 seconds. millis is amount of idle time passed since the last call -- could be zero */
void ClientCursor::idleTimeReport(unsigned millis) {
- recursive_boostlock lock(ccmutex);
+ recursive_scoped_lock lock(ccmutex);
for ( CCByLoc::iterator i = byLoc.begin(); i != byLoc.end(); ) {
CCByLoc::iterator j = i;
i++;
@@ -104,7 +104,7 @@ namespace mongo {
note this is potentially slow
*/
void ClientCursor::informAboutToDeleteBucket(const DiskLoc& b) {
- recursive_boostlock lock(ccmutex);
+ recursive_scoped_lock lock(ccmutex);
RARELY if ( byLoc.size() > 70 ) {
log() << "perf warning: byLoc.size=" << byLoc.size() << " in aboutToDeleteBucket\n";
}
@@ -117,7 +117,7 @@ namespace mongo {
/* must call this on a delete so we clean up the cursors. */
void ClientCursor::aboutToDelete(const DiskLoc& dl) {
- recursive_boostlock lock(ccmutex);
+ recursive_scoped_lock lock(ccmutex);
CCByLoc::iterator j = byLoc.lower_bound(dl);
CCByLoc::iterator stop = byLoc.upper_bound(dl);
@@ -170,7 +170,7 @@ namespace mongo {
assert( pos != -2 );
{
- recursive_boostlock lock(ccmutex);
+ recursive_scoped_lock lock(ccmutex);
setLastLoc_inlock( DiskLoc() ); // removes us from bylocation multimap
clientCursorsById.erase(cursorid);
@@ -193,7 +193,7 @@ namespace mongo {
return;
}
{
- recursive_boostlock lock(ccmutex);
+ recursive_scoped_lock lock(ccmutex);
setLastLoc_inlock(cl);
c->noteLocation();
}
@@ -217,7 +217,7 @@ namespace mongo {
static bool inEmpty = false;
if( test && !inEmpty ) {
inEmpty = true;
- log() << "TEST: manipulate collection during remove" << endl;
+ log() << "TEST: manipulate collection during cc:yield" << endl;
if( test == 1 )
Helpers::emptyCollection(ns.c_str());
else if( test == 2 ) {
@@ -267,8 +267,9 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << " example: { cursorInfo : 1 }";
}
+ virtual LockType locktype(){ return NONE; }
bool run(const char *dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
- recursive_boostlock lock(ClientCursor::ccmutex);
+ recursive_scoped_lock lock(ClientCursor::ccmutex);
result.append("byLocation_size", unsigned( ClientCursor::byLoc.size() ) );
result.append("clientCursors_size", unsigned( ClientCursor::clientCursorsById.size() ) );
return true;
diff --git a/db/clientcursor.h b/db/clientcursor.h
index 03f20e9..42919e3 100644
--- a/db/clientcursor.h
+++ b/db/clientcursor.h
@@ -28,7 +28,7 @@
#include "cursor.h"
#include "jsobj.h"
#include "../util/message.h"
-#include "storage.h"
+#include "diskloc.h"
#include "dbhelpers.h"
#include "matcher.h"
@@ -83,7 +83,7 @@ namespace mongo {
_c = 0;
}
Pointer(long long cursorid) {
- recursive_boostlock lock(ccmutex);
+ recursive_scoped_lock lock(ccmutex);
_c = ClientCursor::find_inlock(cursorid, true);
if( _c ) {
if( _c->_pinValue >= 100 ) {
@@ -105,8 +105,15 @@ namespace mongo {
int pos; // # objects into the cursor so far
BSONObj query;
- ClientCursor() : _idleAgeMillis(0), _pinValue(0), _doingDeletes(false), pos(0) {
- recursive_boostlock lock(ccmutex);
+ ClientCursor(auto_ptr<Cursor>& _c, const char *_ns, bool okToTimeout) :
+ _idleAgeMillis(0), _pinValue(0),
+ _doingDeletes(false),
+ ns(_ns), c(_c),
+ pos(0)
+ {
+ if( !okToTimeout )
+ noTimeout();
+ recursive_scoped_lock lock(ccmutex);
cursorid = allocCursorId_inlock();
clientCursorsById.insert( make_pair(cursorid, this) );
}
@@ -116,11 +123,11 @@ namespace mongo {
return _lastLoc;
}
- auto_ptr< FieldMatcher > filter; // which fields query wants returned
+ shared_ptr< FieldMatcher > fields; // which fields query wants returned
Message originalMessage; // this is effectively an auto ptr for data the matcher points to
/* Get rid of cursors for namespaces that begin with nsprefix.
- Used by drop, deleteIndexes, dropDatabase.
+ Used by drop, dropIndexes, dropDatabase.
*/
static void invalidate(const char *nsPrefix);
@@ -130,7 +137,8 @@ namespace mongo {
* we don't do herein as this->matcher (above) is only initialized for true queries/getmore.
* (ie not set for remote/update)
* @return if the cursor is still valid.
- * if false is returned, then this ClientCursor should be considered deleted
+ * if false is returned, then this ClientCursor should be considered deleted -
+ * in fact, the whole database could be gone.
*/
bool yield();
private:
@@ -147,16 +155,16 @@ namespace mongo {
}
public:
static ClientCursor* find(CursorId id, bool warn = true) {
- recursive_boostlock lock(ccmutex);
+ recursive_scoped_lock lock(ccmutex);
ClientCursor *c = find_inlock(id, warn);
// if this asserts, your code was not thread safe - you either need to set no timeout
// for the cursor or keep a ClientCursor::Pointer in scope for it.
- massert( 12521, "internal error: use of an unlocked ClientCursor", c->_pinValue );
+ massert( 12521, "internal error: use of an unlocked ClientCursor", c == 0 || c->_pinValue );
return c;
}
static bool erase(CursorId id) {
- recursive_boostlock lock(ccmutex);
+ recursive_scoped_lock lock(ccmutex);
ClientCursor *cc = find_inlock(id);
if ( cc ) {
assert( cc->_pinValue < 100 ); // you can't still have an active ClientCursor::Pointer
@@ -195,13 +203,13 @@ namespace mongo {
}
static void idleTimeReport(unsigned millis);
-
+private:
// cursors normally timeout after an inactivy period to prevent excess memory use
// setting this prevents timeout of the cursor in question.
void noTimeout() {
_pinValue++;
}
-
+public:
void setDoingDeletes( bool doingDeletes ){
_doingDeletes = doingDeletes;
}
diff --git a/db/cloner.cpp b/db/cloner.cpp
index 862f37c..d300721 100644
--- a/db/cloner.cpp
+++ b/db/cloner.cpp
@@ -46,6 +46,7 @@ namespace mongo {
snapshot - use $snapshot mode for copying collections. note this should not be used when it isn't required, as it will be slower.
for example repairDatabase need not use it.
*/
+ void setConnection( DBClientWithCommands *c ) { conn.reset( c ); }
bool go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk, bool useReplAuth, bool snapshot);
bool startCloneCollection( const char *fromhost, const char *ns, const BSONObj &query, string& errmsg, bool logForRepl, bool copyIndexes, int logSizeMb, long long &cursorId );
bool finishCloneCollection( const char *fromhost, const char *ns, const BSONObj &query, long long cursorId, string &errmsg );
@@ -97,11 +98,11 @@ namespace mongo {
list<BSONObj> storedForLater;
- assert( c.get() );
+ massert( 13055 , "socket error in Cloner:copy" , c.get() );
long long n = 0;
time_t saveLast = time( 0 );
while ( 1 ) {
- {
+ if( !c->moreInCurrentBatch() || n % 128 == 127 /*yield some*/ ) {
dbtemprelease r;
if ( !c->more() )
break;
@@ -111,7 +112,7 @@ namespace mongo {
/* assure object is valid. note this will slow us down a little. */
if ( !tmp.valid() ) {
stringstream ss;
- ss << "skipping corrupt object from " << from_collection;
+ ss << "Cloner: skipping corrupt object from " << from_collection;
BSONElement e = tmp.firstElement();
try {
e.validate();
@@ -191,7 +192,9 @@ namespace mongo {
auto_ptr<DBClientCursor> c;
{
- if ( !masterSameProcess ) {
+ if ( conn.get() ) {
+ // nothing to do
+ } else if ( !masterSameProcess ) {
auto_ptr< DBClientConnection > c( new DBClientConnection() );
if ( !c->connect( masterHost, errmsg ) )
return false;
@@ -215,7 +218,7 @@ namespace mongo {
log(2) << "\t cloner got " << collection << endl;
- BSONElement e = collection.findElement("name");
+ BSONElement e = collection.getField("name");
if ( e.eoo() ) {
string s = "bad system.namespaces object " + collection.toString();
massert( 10290 , s.c_str(), false);
@@ -231,12 +234,11 @@ namespace mongo {
continue;
}
}
- else if( strchr(from_name, '$') ) {
+ if( strchr(from_name, '$') ) {
// don't clone index namespaces -- we take care of those separately below.
log(2) << "\t\t not cloning because has $ " << endl;
continue;
}
-
toClone.push_back( collection.getOwned() );
}
}
@@ -414,6 +416,7 @@ namespace mongo {
virtual bool slaveOk() {
return false;
}
+ virtual LockType locktype(){ return WRITE; }
virtual void help( stringstream &help ) const {
help << "clone this database from an instance of the db on another host\n";
help << "example: { clone : \"host13\" }";
@@ -436,6 +439,7 @@ namespace mongo {
virtual bool slaveOk() {
return false;
}
+ virtual LockType locktype(){ return WRITE; }
CmdCloneCollection() : Command("cloneCollection") { }
virtual void help( stringstream &help ) const {
help << " example: { cloneCollection: <collection ns>, from: <hostname>, query: <query> }";
@@ -462,7 +466,7 @@ namespace mongo {
/* replication note: we must logOp() not the command, but the cloned data -- if the slave
were to clone it would get a different point-in-time and not match.
*/
- setClient( collection.c_str() );
+ Client::Context ctx( collection );
log() << "cloneCollection. db:" << ns << " collection:" << collection << " from: " << fromhost << " query: " << query << " logSizeMb: " << logSizeMb << ( copyIndexes ? "" : ", not copying indexes" ) << endl;
@@ -479,6 +483,7 @@ namespace mongo {
virtual bool slaveOk() {
return false;
}
+ virtual LockType locktype(){ return WRITE; }
CmdStartCloneCollection() : Command("startCloneCollection") { }
virtual void help( stringstream &help ) const {
help << " example: { startCloneCollection: <collection ns>, from: <hostname>, query: <query> }";
@@ -506,7 +511,7 @@ namespace mongo {
/* replication note: we must logOp() not the command, but the cloned data -- if the slave
were to clone it would get a different point-in-time and not match.
*/
- setClient( collection.c_str() );
+ Client::Context ctx(collection);
log() << "startCloneCollection. db:" << ns << " collection:" << collection << " from: " << fromhost << " query: " << query << endl;
@@ -532,6 +537,7 @@ namespace mongo {
virtual bool slaveOk() {
return false;
}
+ virtual LockType locktype(){ return WRITE; }
CmdFinishCloneCollection() : Command("finishCloneCollection") { }
virtual void help( stringstream &help ) const {
help << " example: { finishCloneCollection: <finishToken> }";
@@ -562,7 +568,7 @@ namespace mongo {
cursorId = cursorIdToken._numberLong();
}
- setClient( collection.c_str() );
+ Client::Context ctx( collection );
log() << "finishCloneCollection. db:" << ns << " collection:" << collection << " from: " << fromhost << " query: " << query << endl;
@@ -571,8 +577,50 @@ namespace mongo {
}
} cmdfinishclonecollection;
+ thread_specific_ptr< DBClientConnection > authConn_;
+ /* Usage:
+ admindb.$cmd.findOne( { copydbgetnonce: 1, fromhost: <hostname> } );
+ */
+ class CmdCopyDbGetNonce : public Command {
+ public:
+ CmdCopyDbGetNonce() : Command("copydbgetnonce") { }
+ virtual bool adminOnly() {
+ return true;
+ }
+ virtual bool slaveOk() {
+ return false;
+ }
+ virtual LockType locktype(){ return WRITE; }
+ virtual void help( stringstream &help ) const {
+ help << "get a nonce for subsequent copy db request from secure server\n";
+ help << "usage: {copydbgetnonce: 1, fromhost: <hostname>}";
+ }
+ virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ string fromhost = cmdObj.getStringField("fromhost");
+ if ( fromhost.empty() ) {
+ /* copy from self */
+ stringstream ss;
+ ss << "localhost:" << cmdLine.port;
+ fromhost = ss.str();
+ }
+ authConn_.reset( new DBClientConnection() );
+ BSONObj ret;
+ {
+ dbtemprelease t;
+ if ( !authConn_->connect( fromhost, errmsg ) )
+ return false;
+ if( !authConn_->runCommand( "admin", BSON( "getnonce" << 1 ), ret ) ) {
+ errmsg = "couldn't get nonce " + string( ret );
+ return false;
+ }
+ }
+ result.appendElements( ret );
+ return true;
+ }
+ } cmdcopydbgetnonce;
+
/* Usage:
- admindb.$cmd.findOne( { copydb: 1, fromhost: <hostname>, fromdb: <db>, todb: <db> } );
+ admindb.$cmd.findOne( { copydb: 1, fromhost: <hostname>, fromdb: <db>, todb: <db>[, username: <username>, nonce: <nonce>, key: <key>] } );
*/
class CmdCopyDb : public Command {
public:
@@ -583,9 +631,10 @@ namespace mongo {
virtual bool slaveOk() {
return false;
}
+ virtual LockType locktype(){ return WRITE; }
virtual void help( stringstream &help ) const {
- help << "copy a database from antoher host to this host\n";
- help << "usage: {copydb: 1, fromhost: <hostname>, fromdb: <db>, todb: <db>}";
+ help << "copy a database from another host to this host\n";
+ help << "usage: {copydb: 1, fromhost: <hostname>, fromdb: <db>, todb: <db>[, username: <username>, nonce: <nonce>, key: <key>]}";
}
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string fromhost = cmdObj.getStringField("fromhost");
@@ -601,9 +650,24 @@ namespace mongo {
errmsg = "parms missing - {copydb: 1, fromhost: <hostname>, fromdb: <db>, todb: <db>}";
return false;
}
- setClient(todb.c_str());
- bool res = cloneFrom(fromhost.c_str(), errmsg, fromdb, /*logForReplication=*/!fromRepl, /*slaveok*/false, /*replauth*/false, /*snapshot*/true);
- cc().clearns();
+ Cloner c;
+ string username = cmdObj.getStringField( "username" );
+ string nonce = cmdObj.getStringField( "nonce" );
+ string key = cmdObj.getStringField( "key" );
+ if ( !username.empty() && !nonce.empty() && !key.empty() ) {
+ uassert( 13008, "must call copydbgetnonce first", authConn_.get() );
+ BSONObj ret;
+ {
+ dbtemprelease t;
+ if ( !authConn_->runCommand( fromdb, BSON( "authenticate" << 1 << "user" << username << "nonce" << nonce << "key" << key ), ret ) ) {
+ errmsg = "unable to login " + string( ret );
+ return false;
+ }
+ }
+ c.setConnection( authConn_.release() );
+ }
+ Client::Context ctx(todb);
+ bool res = c.go(fromhost.c_str(), errmsg, fromdb, /*logForReplication=*/!fromRepl, /*slaveok*/false, /*replauth*/false, /*snapshot*/true);
return res;
}
} cmdcopydb;
@@ -617,6 +681,7 @@ namespace mongo {
virtual bool slaveOk() {
return false;
}
+ virtual LockType locktype(){ return WRITE; }
virtual bool logTheOp() {
return true; // can't log steps when doing fast rename within a db, so always log the op rather than individual steps comprising it.
}
@@ -631,16 +696,19 @@ namespace mongo {
return false;
}
- setClient( source.c_str() );
- NamespaceDetails *nsd = nsdetails( source.c_str() );
- uassert( 10026 , "source namespace does not exist", nsd );
- bool capped = nsd->capped;
+ bool capped = false;
long long size = 0;
- if ( capped )
- for( DiskLoc i = nsd->firstExtent; !i.isNull(); i = i.ext()->xnext )
- size += i.ext()->length;
+ {
+ Client::Context ctx( source );
+ NamespaceDetails *nsd = nsdetails( source.c_str() );
+ uassert( 10026 , "source namespace does not exist", nsd );
+ capped = nsd->capped;
+ if ( capped )
+ for( DiskLoc i = nsd->firstExtent; !i.isNull(); i = i.ext()->xnext )
+ size += i.ext()->length;
+ }
- setClient( target.c_str() );
+ Client::Context ctx( target );
if ( nsdetails( target.c_str() ) ){
uassert( 10027 , "target namespace exists", cmdObj["dropTarget"].trueValue() );
@@ -715,8 +783,10 @@ namespace mongo {
theDataFileMgr.insert( targetIndexes.c_str(), n );
}
- setClient( source.c_str() );
- dropCollection( source, errmsg, result );
+ {
+ Client::Context ctx( source );
+ dropCollection( source, errmsg, result );
+ }
return true;
}
} cmdrenamecollection;
diff --git a/db/cmdline.cpp b/db/cmdline.cpp
new file mode 100644
index 0000000..59eafdd
--- /dev/null
+++ b/db/cmdline.cpp
@@ -0,0 +1,162 @@
+// cmdline.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "stdafx.h"
+#include "cmdline.h"
+#include "commands.h"
+
+namespace po = boost::program_options;
+
+namespace mongo {
+ CmdLine cmdLine;
+
+ void setupSignals();
+ BSONArray argvArray;
+
+ void CmdLine::addGlobalOptions( boost::program_options::options_description& general ,
+ boost::program_options::options_description& hidden ){
+ /* support for -vv -vvvv etc. */
+ for (string s = "vv"; s.length() <= 12; s.append("v")) {
+ hidden.add_options()(s.c_str(), "verbose");
+ }
+
+ general.add_options()
+ ("help,h", "show this usage information")
+ ("version", "show version information")
+ ("config,f", po::value<string>(), "configuration file specifying additional options")
+ ("verbose,v", "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
+ ("quiet", "quieter output")
+ ("port", po::value<int>(&cmdLine.port), "specify port number")
+ ("logpath", po::value<string>() , "file to send all output to instead of stdout" )
+ ("logappend" , "append to logpath instead of over-writing" )
+#ifndef _WIN32
+ ("fork" , "fork server process" )
+#endif
+ ;
+
+ }
+
+
+ bool CmdLine::store( int argc , char ** argv ,
+ boost::program_options::options_description& visible,
+ boost::program_options::options_description& hidden,
+ boost::program_options::positional_options_description& positional,
+ boost::program_options::variables_map &params ){
+
+
+ /* don't allow guessing - creates ambiguities when some options are
+ * prefixes of others. allow long disguises and don't allow guessing
+ * to get away with our vvvvvvv trick. */
+ int style = (((po::command_line_style::unix_style ^
+ po::command_line_style::allow_guessing) |
+ po::command_line_style::allow_long_disguise) ^
+ po::command_line_style::allow_sticky);
+
+
+ try {
+
+ po::options_description all;
+ all.add( visible );
+ all.add( hidden );
+
+ po::store( po::command_line_parser(argc, argv)
+ .options( all )
+ .positional( positional )
+ .style( style )
+ .run(),
+ params );
+
+ if ( params.count("config") ){
+ ifstream f( params["config"].as<string>().c_str() );
+ if ( ! f.is_open() ){
+ cout << "ERROR: could not read from config file" << endl << endl;
+ cout << visible << endl;
+ return false;
+ }
+
+ po::store( po::parse_config_file( f , all ) , params );
+ f.close();
+ }
+
+ po::notify(params);
+ }
+ catch (po::error &e) {
+ cout << "ERROR: " << e.what() << endl << endl;
+ cout << visible << endl;
+ return false;
+ }
+
+ if (params.count("verbose")) {
+ logLevel = 1;
+ }
+
+ for (string s = "vv"; s.length() <= 12; s.append("v")) {
+ if (params.count(s)) {
+ logLevel = s.length();
+ }
+ }
+
+ if (params.count("quiet")) {
+ cmdLine.quiet = true;
+ }
+
+#ifndef _WIN32
+ if (params.count("fork")) {
+ if ( ! params.count( "logpath" ) ){
+ cout << "--fork has to be used with --logpath" << endl;
+ ::exit(-1);
+ }
+ pid_t c = fork();
+ if ( c ){
+ cout << "forked process: " << c << endl;
+ ::exit(0);
+ }
+ setsid();
+ setupSignals();
+ }
+#endif
+ if (params.count("logpath")) {
+ string lp = params["logpath"].as<string>();
+ uassert( 10033 , "logpath has to be non-zero" , lp.size() );
+ initLogging( lp , params.count( "logappend" ) );
+ }
+
+ {
+ BSONArrayBuilder b;
+ for (int i=0; i < argc; i++)
+ b << argv[i];
+ argvArray = b.arr();
+ }
+
+ return true;
+ }
+
+ class CmdGetCmdLineOpts : Command{
+ public:
+ CmdGetCmdLineOpts(): Command("getCmdLineOpts") {}
+ virtual LockType locktype() { return NONE; }
+ virtual bool adminOnly() { return true; }
+ virtual bool slaveOk() { return true; }
+
+ virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl){
+ result.append("argv", argvArray);
+ return true;
+ }
+
+ } cmdGetCmdLineOpts;
+}
diff --git a/db/cmdline.h b/db/cmdline.h
index b071259..3e46c5e 100644
--- a/db/cmdline.h
+++ b/db/cmdline.h
@@ -16,6 +16,8 @@
#pragma once
+#include "../stdafx.h"
+
namespace mongo {
/* command line options
@@ -23,6 +25,7 @@ namespace mongo {
/* concurrency: OK/READ */
struct CmdLine {
int port; // --port
+ bool rest; // --rest
string source; // --source
string only; // --only
@@ -47,11 +50,25 @@ namespace mongo {
};
CmdLine() :
- port(DefaultDBPort), quiet(false), notablescan(false), prealloc(true), smallfiles(false),
+ port(DefaultDBPort), rest(false), quiet(false), notablescan(false), prealloc(true), smallfiles(false),
quota(false), quotaFiles(8), cpu(false), oplogSize(0), defaultProfile(0), slowMS(100)
{ }
+
- };
+ static void addGlobalOptions( boost::program_options::options_description& general ,
+ boost::program_options::options_description& hidden );
+
+ /**
+ * @return true if should run program, false if should exit
+ */
+ static bool store( int argc , char ** argv ,
+ boost::program_options::options_description& visible,
+ boost::program_options::options_description& hidden,
+ boost::program_options::positional_options_description& positional,
+ boost::program_options::variables_map &output );
+ };
+
extern CmdLine cmdLine;
+
}
diff --git a/db/commands.cpp b/db/commands.cpp
index 3078ea1..83d7219 100644
--- a/db/commands.cpp
+++ b/db/commands.cpp
@@ -20,6 +20,8 @@
#include "stdafx.h"
#include "jsobj.h"
#include "commands.h"
+#include "client.h"
+#include "replset.h"
namespace mongo {
@@ -72,9 +74,14 @@ namespace mongo {
ok = c->run(ns, jsobj, errmsg, anObjBuilder, false);
}
- anObjBuilder.append( "ok" , ok ? 1.0 : 0.0 );
+ BSONObj tmp = anObjBuilder.asTempObj();
+ bool have_ok = tmp.hasField("ok");
+ bool have_errmsg = tmp.hasField("errmsg");
+
+ if (!have_ok)
+ anObjBuilder.append( "ok" , ok ? 1.0 : 0.0 );
- if ( !ok ) {
+ if ( !ok && !have_errmsg) {
anObjBuilder.append("errmsg", errmsg);
uassert_nothrow(errmsg.c_str());
}
@@ -92,11 +99,12 @@ namespace mongo {
}
- bool Command::readOnly( const string& name ){
+ Command::LockType Command::locktype( const string& name ){
Command * c = findCommand( name );
if ( ! c )
- return false;
- return c->readOnly();
+ return WRITE;
+ return c->locktype();
}
+
} // namespace mongo
diff --git a/db/commands.h b/db/commands.h
index 20fb98c..518dcb7 100644
--- a/db/commands.h
+++ b/db/commands.h
@@ -25,11 +25,15 @@ namespace mongo {
class BSONObj;
class BSONObjBuilder;
class BufBuilder;
-
+ class Client;
+
// db "commands" (sent via db.$cmd.findOne(...))
// subclass to make a command.
class Command {
public:
+
+ enum LockType { READ = -1 , NONE = 0 , WRITE = 1 };
+
string name;
/* run the given command
@@ -42,12 +46,12 @@ namespace mongo {
*/
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) = 0;
- /* true if a read lock is sufficient
- note: logTheTop() MUST be false if readOnly
+ /*
+ note: logTheTop() MUST be false if READ
+ if NONE, can't use Client::Context setup
+ use with caution
*/
- virtual bool readOnly() {
- return false;
- }
+ virtual LockType locktype() = 0;
/* Return true if only the admin ns has privileges to run this command. */
virtual bool adminOnly() {
@@ -105,10 +109,11 @@ namespace mongo {
public:
static bool runAgainstRegistered(const char *ns, BSONObj& jsobj, BSONObjBuilder& anObjBuilder);
- static bool readOnly( const string& name );
+ static LockType locktype( const string& name );
static Command * findCommand( const string& name );
};
bool _runCommands(const char *ns, BSONObj& jsobj, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl, int queryOptions);
+
} // namespace mongo
diff --git a/db/common.cpp b/db/common.cpp
new file mode 100644
index 0000000..a199bd1
--- /dev/null
+++ b/db/common.cpp
@@ -0,0 +1,14 @@
+// common.cpp
+
+#include "stdafx.h"
+#include "concurrency.h"
+
+/**
+ * this just has globals
+ */
+namespace mongo {
+
+ /* we use new here so we don't have to worry about destructor orders at program shutdown */
+ MongoMutex &dbMutex( *(new MongoMutex) );
+
+}
diff --git a/db/concurrency.h b/db/concurrency.h
index daf09b6..de8f242 100644
--- a/db/concurrency.h
+++ b/db/concurrency.h
@@ -1,3 +1,19 @@
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
/* concurrency.h
mongod concurrency rules & notes will be placed here.
@@ -17,19 +33,34 @@
#include <boost/thread/shared_mutex.hpp>
#undef assert
#define assert xassert
+#define HAVE_READLOCK
#else
-#warning built with boost version 1.34 or older limited concurrency
+#warning built with boost version 1.34 or older - limited concurrency
#endif
namespace mongo {
+ inline bool readLockSupported(){
+#ifdef HAVE_READLOCK
+ return true;
+#else
+ return false;
+#endif
+ }
+
+ string sayClientState();
+ bool haveClient();
+
+ void curopWaitingForLock( int type );
+ void curopGotLock();
+
/* mutex time stats */
class MutexInfo {
unsigned long long start, enter, timeLocked; // all in microseconds
int locked;
public:
- MutexInfo() : locked(0) {
+ MutexInfo() : timeLocked(0) , locked(0) {
start = curTimeMicros64();
}
void entered() {
@@ -51,9 +82,12 @@ namespace mongo {
s = start;
tl = timeLocked;
}
+ unsigned long long getTimeLocked() const {
+ return timeLocked;
+ }
};
-#if BOOST_VERSION >= 103500
+#ifdef HAVE_READLOCK
//#if 0
class MongoMutex {
MutexInfo _minfo;
@@ -80,19 +114,25 @@ namespace mongo {
void assertAtLeastReadLocked() { assert(atLeastReadLocked()); }
void lock() {
- DEV cout << "LOCK" << endl;
+ //DEV cout << "LOCK" << endl;
+ DEV assert( haveClient() );
+
int s = _state.get();
if( s > 0 ) {
_state.set(s+1);
return;
}
- massert( 10293 , "internal error: locks are not upgradeable", s == 0 );
+ massert( 10293 , (string)"internal error: locks are not upgradeable: " + sayClientState() , s == 0 );
_state.set(1);
+
+ curopWaitingForLock( 1 );
_m.lock();
+ curopGotLock();
+
_minfo.entered();
}
void unlock() {
- DEV cout << "UNLOCK" << endl;
+ //DEV cout << "UNLOCK" << endl;
int s = _state.get();
if( s > 1 ) {
_state.set(s-1);
@@ -103,7 +143,7 @@ namespace mongo {
_releasedEarly.set(false);
return;
}
- assert(false); // attempt to unlock when wasn't in a write lock
+ massert( 12599, "internal error: attempt to unlock when wasn't in a write lock", false);
}
_state.set(0);
_minfo.leaving();
@@ -121,7 +161,7 @@ namespace mongo {
}
void lock_shared() {
- DEV cout << " LOCKSHARED" << endl;
+ //DEV cout << " LOCKSHARED" << endl;
int s = _state.get();
if( s ) {
if( s > 0 ) {
@@ -136,10 +176,29 @@ namespace mongo {
}
}
_state.set(-1);
+ curopWaitingForLock( -1 );
_m.lock_shared();
+ curopGotLock();
+ }
+
+ bool lock_shared_try( int millis ) {
+ int s = _state.get();
+ if ( s ){
+ // we already have a lock, so no need to try
+ lock_shared();
+ return true;
+ }
+
+ boost::system_time until = get_system_time();
+ until += boost::posix_time::milliseconds(2);
+ bool got = _m.timed_lock_shared( until );
+ if ( got )
+ _state.set(-1);
+ return got;
}
+
void unlock_shared() {
- DEV cout << " UNLOCKSHARED" << endl;
+ //DEV cout << " UNLOCKSHARED" << endl;
int s = _state.get();
if( s > 0 ) {
assert( s > 1 ); /* we must have done a lock write first to have s > 1 */
@@ -154,6 +213,7 @@ namespace mongo {
_state.set(0);
_m.unlock_shared();
}
+
MutexInfo& info() { return _minfo; }
};
#else
@@ -165,7 +225,7 @@ namespace mongo {
public:
MongoMutex() { }
void lock() {
-#if BOOST_VERSION >= 103500
+#ifdef HAVE_READLOCK
m.lock();
#else
boost::detail::thread::lock_ops<boost::recursive_mutex>::lock(m);
@@ -182,7 +242,7 @@ namespace mongo {
void _unlock() {
_minfo.leaving();
-#if BOOST_VERSION >= 103500
+#ifdef HAVE_READLOCK
m.unlock();
#else
boost::detail::thread::lock_ops<boost::recursive_mutex>::unlock(m);
@@ -197,6 +257,18 @@ namespace mongo {
}
void lock_shared() { lock(); }
+ bool lock_shared_try( int millis ) {
+ while ( millis-- ){
+ if ( getState() ){
+ sleepmillis(1);
+ continue;
+ }
+ lock_shared();
+ return true;
+ }
+ return false;
+ }
+
void unlock_shared() { unlock(); }
MutexInfo& info() { return _minfo; }
void assertWriteLocked() {
@@ -220,8 +292,10 @@ namespace mongo {
dbMutex.lock();
}
~writelock() {
- dbunlocking_write();
- dbMutex.unlock();
+ DESTRUCTOR_GUARD(
+ dbunlocking_write();
+ dbMutex.unlock();
+ );
}
};
@@ -230,11 +304,43 @@ namespace mongo {
dbMutex.lock_shared();
}
~readlock() {
- dbunlocking_read();
- dbMutex.unlock_shared();
+ DESTRUCTOR_GUARD(
+ dbunlocking_read();
+ dbMutex.unlock_shared();
+ );
}
+ };
+
+ struct readlocktry {
+ readlocktry( const string&ns , int tryms ){
+ _got = dbMutex.lock_shared_try( tryms );
+ }
+ ~readlocktry() {
+ if ( _got ){
+ dbunlocking_read();
+ dbMutex.unlock_shared();
+ }
+ }
+ bool got(){
+ return _got;
+ }
+ bool _got;
};
+ struct atleastreadlock {
+ atleastreadlock( const string& ns ){
+ _prev = dbMutex.getState();
+ if ( _prev == 0 )
+ dbMutex.lock_shared();
+ }
+ ~atleastreadlock(){
+ if ( _prev == 0 )
+ dbMutex.unlock_shared();
+ }
+
+ int _prev;
+ };
+
class mongolock {
bool _writelock;
public:
@@ -246,14 +352,15 @@ namespace mongo {
dbMutex.lock_shared();
}
~mongolock() {
- if( _writelock ) {
- dbunlocking_write();
- dbMutex.unlock();
- }
- else {
- dbunlocking_read();
- dbMutex.unlock_shared();
- }
+ DESTRUCTOR_GUARD(
+ if( _writelock ) {
+ dbunlocking_write();
+ dbMutex.unlock();
+ } else {
+ dbunlocking_read();
+ dbMutex.unlock_shared();
+ }
+ );
}
/* this unlocks, does NOT upgrade. that works for our current usage */
void releaseAndWriteLock();
diff --git a/db/curop.h b/db/curop.h
index 8a28f4f..e5d38d7 100644
--- a/db/curop.h
+++ b/db/curop.h
@@ -1,10 +1,27 @@
// curop.h
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
#pragma once
#include "namespace.h"
-#include "security.h"
#include "client.h"
+#include "../util/atomic_int.h"
+#include "db.h"
namespace mongo {
@@ -20,19 +37,53 @@ namespace mongo {
/* Current operation (for the current Client).
an embedded member of Client class, and typically used from within the mutex there. */
class CurOp : boost::noncopyable {
- static WrappingInt _nextOpNum;
+ static AtomicUInt _nextOpNum;
static BSONObj _tooBig; // { $msg : "query not recording (too large)" }
+
+ Client * _client;
+ CurOp * _wrapped;
+
+ unsigned long long _start;
+ unsigned long long _checkpoint;
+ unsigned long long _end;
bool _active;
- Timer _timer;
int _op;
- WrappingInt _opNum;
+ bool _command;
+ int _lockType; // see concurrency.h for values
+ bool _waitingForLock;
+ int _dbprofile; // 0=off, 1=slow, 2=all
+ AtomicUInt _opNum;
char _ns[Namespace::MaxNsLen+2];
- struct sockaddr_in client;
-
+ struct sockaddr_in _remote;
+
char _queryBuf[256];
- bool haveQuery() const { return *((int *) _queryBuf) != 0; }
+
void resetQuery(int x=0) { *((int *)_queryBuf) = x; }
+
+ OpDebug _debug;
+
+ ThreadSafeString _message;
+ ProgressMeter _progressMeter;
+
+ void _reset(){
+ _command = false;
+ _lockType = 0;
+ _dbprofile = 0;
+ _end = 0;
+ _waitingForLock = false;
+ _message = "";
+ _progressMeter.finished();
+ }
+
+ void setNS(const char *ns) {
+ strncpy(_ns, ns, Namespace::MaxNsLen);
+ }
+
+ public:
+
+ bool haveQuery() const { return *((int *) _queryBuf) != 0; }
+
BSONObj query() {
if( *((int *) _queryBuf) == 1 ) {
return _tooBig;
@@ -41,37 +92,108 @@ namespace mongo {
return o;
}
- OpDebug _debug;
- public:
- void reset( const sockaddr_in &_client) {
+ void ensureStarted(){
+ if ( _start == 0 )
+ _start = _checkpoint = curTimeMicros64();
+ }
+ void enter( Client::Context * context ){
+ ensureStarted();
+ setNS( context->ns() );
+ if ( context->_db && context->_db->profile > _dbprofile )
+ _dbprofile = context->_db->profile;
+ }
+
+ void leave( Client::Context * context ){
+ unsigned long long now = curTimeMicros64();
+ Top::global.record( _ns , _op , _lockType , now - _checkpoint , _command );
+ _checkpoint = now;
+ }
+
+ void reset( const sockaddr_in & remote, int op ) {
+ _reset();
+ _start = _checkpoint = 0;
_active = true;
- _opNum = _nextOpNum.atomicIncrement();
- _timer.reset();
+ _opNum = _nextOpNum++;
_ns[0] = '?'; // just in case not set later
_debug.reset();
resetQuery();
- client = _client;
+ _remote = remote;
+ _op = op;
+ }
+
+ void markCommand(){
+ _command = true;
+ }
+
+ void waitingForLock( int type ){
+ _waitingForLock = true;
+ if ( type > 0 )
+ _lockType = 1;
+ else
+ _lockType = -1;
+ }
+ void gotLock(){
+ _waitingForLock = false;
}
OpDebug& debug(){
return _debug;
}
+
+ int profileLevel() const {
+ return _dbprofile;
+ }
- WrappingInt opNum() const { return _opNum; }
- bool active() const { return _active; }
+ const char * getNS() const {
+ return _ns;
+ }
- int elapsedMillis(){ return _timer.millis(); }
+ bool shouldDBProfile( int ms ) const {
+ if ( _dbprofile <= 0 )
+ return false;
+
+ return _dbprofile >= 2 || ms >= cmdLine.slowMS;
+ }
+
+ AtomicUInt opNum() const { return _opNum; }
+
+ /** if this op is running */
+ bool active() const { return _active; }
+
+ int getLockType() const { return _lockType; }
+ bool isWaitingForLock() const { return _waitingForLock; }
+ int getOp() const { return _op; }
+
/** micros */
- unsigned long long startTime(){
- return _timer.startTime();
+ unsigned long long startTime() {
+ ensureStarted();
+ return _start;
}
- void setActive(bool active) { _active = active; }
- void setNS(const char *ns) {
- strncpy(_ns, ns, Namespace::MaxNsLen);
+ void done() {
+ _active = false;
+ _end = curTimeMicros64();
+ }
+
+ unsigned long long totalTimeMicros() {
+ massert( 12601 , "CurOp not marked done yet" , ! _active );
+ return _end - startTime();
+ }
+
+ int totalTimeMillis() {
+ return (int) (totalTimeMicros() / 1000);
}
- void setOp(int op) { _op = op; }
+
+ int elapsedMillis() {
+ unsigned long long total = curTimeMicros64() - startTime();
+ return (int) (total / 1000);
+ }
+
+ int elapsedSeconds() {
+ return elapsedMillis() / 1000;
+ }
+
void setQuery(const BSONObj& query) {
if( query.objsize() > (int) sizeof(_queryBuf) ) {
resetQuery(1); // flag as too big and return
@@ -80,9 +202,15 @@ namespace mongo {
memcpy(_queryBuf, query.objdata(), query.objsize());
}
- CurOp() {
+ CurOp( Client * client , CurOp * wrapped = 0 ) {
+ _client = client;
+ _wrapped = wrapped;
+ if ( _wrapped ){
+ _client->_curOp = this;
+ }
+ _start = _checkpoint = 0;
_active = false;
-// opNum = 0;
+ _reset();
_op = 0;
// These addresses should never be written to again. The zeroes are
// placed here as a precaution because currentOp may be accessed
@@ -90,10 +218,14 @@ namespace mongo {
memset(_ns, 0, sizeof(_ns));
memset(_queryBuf, 0, sizeof(_queryBuf));
}
+
+ ~CurOp(){
+ if ( _wrapped )
+ _client->_curOp = _wrapped;
+ }
BSONObj info() {
- AuthenticationInfo *ai = currentClient.get()->ai;
- if( !ai->isAuthorized("admin") ) {
+ if( ! cc().getAuthenticationInfo()->isAuthorized("admin") ) {
BSONObjBuilder b;
b.append("err", "unauthorized");
return b.obj();
@@ -101,35 +233,30 @@ namespace mongo {
return infoNoauth();
}
- BSONObj infoNoauth() {
- BSONObjBuilder b;
- b.append("opid", _opNum);
- b.append("active", _active);
- if( _active )
- b.append("secs_running", _timer.seconds() );
- if( _op == 2004 )
- b.append("op", "query");
- else if( _op == 2005 )
- b.append("op", "getMore");
- else if( _op == 2001 )
- b.append("op", "update");
- else if( _op == 2002 )
- b.append("op", "insert");
- else if( _op == 2006 )
- b.append("op", "delete");
- else
- b.append("op", _op);
- b.append("ns", _ns);
+ BSONObj infoNoauth();
+
+ string getRemoteString(){
+ stringstream ss;
+ ss << inet_ntoa( _remote.sin_addr ) << ":" << ntohs( _remote.sin_port );
+ return ss.str();
+ }
- if( haveQuery() ) {
- b.append("query", query());
+ ProgressMeter& setMessage( const char * msg , long long progressMeterTotal = 0 , int secondsBetween = 3 ){
+ _message = msg;
+ if ( progressMeterTotal ){
+ assert( ! _progressMeter.isActive() );
+ _progressMeter.reset( progressMeterTotal , secondsBetween );
+ }
+ else {
+ _progressMeter.finished();
}
- // b.append("inLock", ??
- stringstream clientStr;
- clientStr << inet_ntoa( client.sin_addr ) << ":" << ntohs( client.sin_port );
- b.append("client", clientStr.str());
- return b.obj();
+ return _progressMeter;
}
+
+ string getMessage() const { return _message; }
+ ProgressMeter getProgressMeter() { return _progressMeter; }
+
+ friend class Client;
};
/* 0 = ok
@@ -137,12 +264,12 @@ namespace mongo {
future: maybe use this as a "going away" thing on process termination with a higher flag value
*/
extern class KillCurrentOp {
- enum { Off, On, All } state;
- WrappingInt toKill;
+ enum { Off, On, All } state;
+ AtomicUInt toKill;
public:
void killAll() { state = All; }
- void kill(WrappingInt i) { toKill = i; state = On; }
-
+ void kill(AtomicUInt i) { toKill = i; state = On; }
+
void checkForInterrupt() {
if( state != Off ) {
if( state == All )
diff --git a/db/cursor.h b/db/cursor.h
index 3868cca..69e5d67 100644
--- a/db/cursor.h
+++ b/db/cursor.h
@@ -19,7 +19,7 @@
#include "../stdafx.h"
#include "jsobj.h"
-#include "storage.h"
+#include "diskloc.h"
namespace mongo {
@@ -76,6 +76,8 @@ namespace mongo {
/* called before query getmore block is iterated */
virtual void checkLocation() { }
+
+ virtual bool supportGetMore() = 0;
virtual string toString() {
return "abstract?";
@@ -91,10 +93,10 @@ namespace mongo {
*/
virtual bool getsetdup(DiskLoc loc) = 0;
- virtual BSONObj prettyStartKey() const { return BSONObj(); }
- virtual BSONObj prettyEndKey() const { return BSONObj(); }
+ virtual BSONObj prettyIndexBounds() const { return BSONObj(); }
virtual bool capped() const { return false; }
+
};
// strategy object implementing direction of traversal.
@@ -157,6 +159,8 @@ namespace mongo {
return tailable_;
}
virtual bool getsetdup(DiskLoc loc) { return false; }
+
+ virtual bool supportGetMore() { return true; }
};
/* used for order { $natural: -1 } */
diff --git a/db/database.h b/db/database.h
index 0fcf386..868af0b 100644
--- a/db/database.h
+++ b/db/database.h
@@ -36,7 +36,7 @@ namespace mongo {
: name(nm), path(_path), namespaceIndex( path, name ) {
{ // check db name is valid
- int L = strlen(nm);
+ size_t L = strlen(nm);
uassert( 10028 , "db name is empty", L > 0 );
uassert( 10029 , "bad db name [1]", *nm != '.' );
uassert( 10030 , "bad db name [2]", nm[L-1] != '.' );
@@ -63,8 +63,8 @@ namespace mongo {
~Database() {
magic = 0;
btreeStore->closeFiles(name, path);
- int n = files.size();
- for ( int i = 0; i < n; i++ )
+ size_t n = files.size();
+ for ( size_t i = 0; i < n; i++ )
delete files[i];
}
@@ -79,12 +79,19 @@ namespace mongo {
return ! namespaceIndex.allocated();
}
- bool exists(int n) {
+ boost::filesystem::path fileName( int n ) {
stringstream ss;
ss << name << '.' << n;
boost::filesystem::path fullName;
- fullName = boost::filesystem::path(path) / ss.str();
- return boost::filesystem::exists(fullName);
+ fullName = boost::filesystem::path(path);
+ if ( directoryperdb )
+ fullName /= name;
+ fullName /= ss.str();
+ return fullName;
+ }
+
+ bool exists(int n) {
+ return boost::filesystem::exists( fileName( n ) );
}
void openAllFiles() {
@@ -124,10 +131,7 @@ namespace mongo {
p = files[n];
}
if ( p == 0 ) {
- stringstream ss;
- ss << name << '.' << n;
- boost::filesystem::path fullName;
- fullName = boost::filesystem::path(path) / ss.str();
+ boost::filesystem::path fullName = fileName( n );
string fullNameString = fullName.string();
p = new MongoDataFile(n);
int minSize = 0;
diff --git a/db/db.cpp b/db/db.cpp
index 9b1a22a..fe63df1 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -29,6 +29,7 @@
#include "instance.h"
#include "clientcursor.h"
#include "pdfile.h"
+#include "stats/counters.h"
#if !defined(_WIN32)
#include <sys/file.h>
#endif
@@ -40,6 +41,7 @@
#include "../scripting/engine.h"
#include "module.h"
#include "cmdline.h"
+#include "stats/snapshots.h"
namespace mongo {
@@ -54,10 +56,11 @@ namespace mongo {
extern string bind_ip;
extern char *appsrvPath;
- extern bool autoresync;
extern int diagLogging;
extern int lenForNewNsFiles;
extern int lockFile;
+
+ extern string repairpath;
void setupSignals();
void closeAllSockets();
@@ -65,9 +68,14 @@ namespace mongo {
void pairWith(const char *remoteEnd, const char *arb);
void setRecCacheSize(unsigned MB);
+ void exitCleanly( ExitCode code );
+
const char *ourgetns() {
Client *c = currentClient.get();
- return c ? c->ns() : "";
+ if ( ! c )
+ return "";
+ Client::Context* cc = c->getContext();
+ return cc ? cc->ns() : "";
}
struct MyStartupTests {
@@ -80,7 +88,7 @@ namespace mongo {
void testTheDb() {
OpDebug debug;
- setClient("sys.unittest.pdfile");
+ Client::Context ctx("sys.unittest.pdfile");
/* this is not validly formatted, if you query this namespace bad things will happen */
theDataFileMgr.insert("sys.unittest.pdfile", (void *) "hello worldx", 13);
@@ -99,8 +107,6 @@ namespace mongo {
c->advance();
}
out() << endl;
-
- cc().clearns();
}
MessagingPort *connGrab = 0;
@@ -137,13 +143,11 @@ namespace mongo {
};
void webServerThread();
- void pdfileInit();
void listen(int port) {
log() << mongodVersion() << endl;
printGitVersion();
printSysInfo();
- pdfileInit();
//testTheDb();
log() << "waiting for connections on port " << port << endl;
OurListener l(bind_ip, port);
@@ -193,7 +197,7 @@ namespace mongo {
try {
- c.ai->isLocalHost = dbMsgPort.farEnd.isLocalHost();
+ c.getAuthenticationInfo()->isLocalHost = dbMsgPort.farEnd.isLocalHost();
Message m;
while ( 1 ) {
@@ -206,6 +210,11 @@ namespace mongo {
break;
}
+ if ( inShutdown() ) {
+ log() << "got request after shutdown()" << endl;
+ break;
+ }
+
lastError.startRequest( m , le );
DbResponse dbresponse;
@@ -236,6 +245,9 @@ namespace mongo {
problem() << "SocketException in connThread, closing client connection" << endl;
dbMsgPort.shutdown();
}
+ catch ( const ClockSkewException & ) {
+ exitCleanly( EXIT_CLOCK_SKEW );
+ }
catch ( std::exception &e ) {
problem() << "Uncaught std::exception: " << e.what() << ", terminating" << endl;
dbexit( EXIT_UNCAUGHT );
@@ -263,8 +275,10 @@ namespace mongo {
// SockAddr db("172.16.0.179", MessagingPort::DBPort);
MessagingPort p;
- if ( !p.connect(db) )
+ if ( !p.connect(db) ){
+ out() << "msg couldn't connect" << endl;
return;
+ }
const int Loops = 1;
for ( int q = 0; q < Loops; q++ ) {
@@ -280,8 +294,9 @@ namespace mongo {
Timer t;
bool ok = p.call(send, response);
double tm = ((double) t.micros()) + 1;
- out() << " ****ok. response.data:" << ok << " time:" << tm / 1000.0 << "ms " <<
- ((double) len) * 8 / 1000000 / (tm/1000000) << "Mbps" << endl;
+ out() << " ****ok. response.data:" << ok << " time:" << tm / 1000.0 << "ms "
+ << "len: " << len << " data: " << response.data->_data << endl;
+
if ( q+1 < Loops ) {
out() << "\t\tSLEEP 8 then sending again as a test" << endl;
sleepsecs(8);
@@ -327,15 +342,22 @@ namespace mongo {
return repairDatabase( dbName.c_str(), errmsg );
}
+ extern bool checkNsFilesOnLoad;
+
void repairDatabases() {
+ Client::GodScope gs;
log(1) << "enter repairDatabases" << endl;
+
+ assert(checkNsFilesOnLoad);
+ checkNsFilesOnLoad = false; // we are mainly just checking the header - don't scan the whole .ns file for every db here.
+
dblock lk;
vector< string > dbNames;
getDatabaseNames( dbNames );
for ( vector< string >::iterator i = dbNames.begin(); i != dbNames.end(); ++i ) {
string dbName = *i;
log(1) << "\t" << dbName << endl;
- assert( !setClient( dbName.c_str() ) );
+ Client::Context ctx( dbName );
MongoDataFile *p = cc().database()->getFile( 0 );
MDFHeader *h = p->getHeader();
if ( !h->currentVersion() || forceRepair ) {
@@ -369,6 +391,8 @@ namespace mongo {
cc().shutdown();
dbexit( EXIT_CLEAN );
}
+
+ checkNsFilesOnLoad = true;
}
void clearTmpFiles() {
@@ -377,12 +401,13 @@ namespace mongo {
i != boost::filesystem::directory_iterator(); ++i ) {
string fileName = boost::filesystem::path(*i).leaf();
if ( boost::filesystem::is_directory( *i ) &&
- fileName.length() > 2 && fileName.substr( 0, 3 ) == "tmp" )
+ fileName.length() && fileName[ 0 ] == '$' )
boost::filesystem::remove_all( *i );
}
}
-
+
void clearTmpCollections() {
+ Client::GodScope gs;
vector< string > toDelete;
DBDirectClient cli;
auto_ptr< DBClientCursor > c = cli.query( "local.system.namespaces", Query( fromjson( "{name:/^local.temp./}" ) ) );
@@ -395,7 +420,7 @@ namespace mongo {
cli.dropCollection( *i );
}
}
-
+
/**
* does background async flushes of mmapped files
*/
@@ -403,15 +428,23 @@ namespace mongo {
public:
void run(){
log(1) << "will flush memory every: " << _sleepsecs << " seconds" << endl;
+ int time_flushing = 0;
while ( ! inShutdown() ){
if ( _sleepsecs == 0 ){
// in case at some point we add an option to change at runtime
sleepsecs(5);
continue;
}
- sleepmillis( (int)(_sleepsecs * 1000) );
- MemoryMappedFile::flushAll( false );
- log(1) << "flushing mmmap" << endl;
+
+ sleepmillis( (int)(std::max(0.0, (_sleepsecs * 1000) - time_flushing)) );
+
+ Date_t start = jsTime();
+ MemoryMappedFile::flushAll( true );
+ time_flushing = (int) (jsTime() - start);
+
+ globalFlushCounters.flushed(time_flushing);
+
+ log(1) << "flushing mmap took " << time_flushing << "ms" << endl;
}
}
@@ -445,14 +478,21 @@ namespace mongo {
bool is32bit = sizeof(int*) == 4;
log() << "Mongo DB : starting : pid = " << pid << " port = " << cmdLine.port << " dbpath = " << dbpath
- << " master = " << master << " slave = " << (int) slave << " " << ( is32bit ? "32" : "64" ) << "-bit " << endl;
-
+ << " master = " << replSettings.master << " slave = " << (int) replSettings.slave << " " << ( is32bit ? "32" : "64" ) << "-bit " << endl;
+ DEV log() << " FULL DEBUG ENABLED " << endl;
show_32_warning();
- stringstream ss;
- ss << "dbpath (" << dbpath << ") does not exist";
- massert( 10296 , ss.str().c_str(), boost::filesystem::exists( dbpath ) );
-
+ {
+ stringstream ss;
+ ss << "dbpath (" << dbpath << ") does not exist";
+ massert( 10296 , ss.str().c_str(), boost::filesystem::exists( dbpath ) );
+ }
+ {
+ stringstream ss;
+ ss << "repairpath (" << repairpath << ") does not exist";
+ massert( 12590 , ss.str().c_str(), boost::filesystem::exists( repairpath ) );
+ }
+
acquirePathLock();
remove_all( dbpath + "/_tmp/" );
@@ -461,11 +501,10 @@ namespace mongo {
BOOST_CHECK_EXCEPTION( clearTmpFiles() );
Client::initThread("initandlisten");
+ _diaglog.init();
clearTmpCollections();
- _diaglog.init();
-
Module::initAll();
#if 0
@@ -493,6 +532,7 @@ namespace mongo {
/* this is for security on certain platforms (nonce generation) */
srand((unsigned) (curTimeMicros() ^ startupSrandTimer.micros()));
+ snapshotThread.go();
listen(listenPort);
// listen() will return when exit code closes its socket.
@@ -557,6 +597,7 @@ string arg_error_check(int argc, char* argv[]) {
int main(int argc, char* argv[], char *envp[] )
{
+ static StaticObserver staticObserver;
getcurns = ourgetns;
po::options_description general_options("General options");
@@ -564,25 +605,17 @@ int main(int argc, char* argv[], char *envp[] )
po::options_description sharding_options("Sharding options");
po::options_description visible_options("Allowed options");
po::options_description hidden_options("Hidden options");
- po::options_description cmdline_options("Command line options");
po::positional_options_description positional_options;
+ CmdLine::addGlobalOptions( general_options , hidden_options );
+
general_options.add_options()
- ("help,h", "show this usage information")
- ("version", "show version information")
- ("config,f", po::value<string>(), "configuration file specifying additional options")
- ("port", po::value<int>(&cmdLine.port)/*->default_value(CmdLine::DefaultDBPort)*/, "specify port number")
("bind_ip", po::value<string>(&bind_ip),
"local ip address to bind listener - all local ips bound by default")
- ("verbose,v", "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
("dbpath", po::value<string>()->default_value("/data/db/"), "directory for datafiles")
- ("quiet", "quieter output")
- ("logpath", po::value<string>() , "file to send all output to instead of stdout" )
- ("logappend" , "appnd to logpath instead of over-writing" )
-#ifndef _WIN32
- ("fork" , "fork server process" )
-#endif
+ ("directoryperdb", "each database will be stored in a separate directory")
+ ("repairpath", po::value<string>() , "root directory for repair files - defaults to dbpath" )
("cpu", "periodically show cpu and iowait utilization")
("noauth", "run without security")
("auth", "run with security")
@@ -593,6 +626,7 @@ int main(int argc, char* argv[], char *envp[] )
("nocursors", "diagnostic/debugging option")
("nohints", "ignore query hints")
("nohttpinterface", "disable http interface")
+ ("rest","turn on simple rest api")
("noscripting", "disable scripting engine")
("noprealloc", "disable data file preallocation")
("smallfiles", "use a smaller default file size")
@@ -620,8 +654,10 @@ int main(int argc, char* argv[], char *envp[] )
("only", po::value<string>(), "when slave: specify a single database to replicate")
("pairwith", po::value<string>(), "address of server to pair with")
("arbiter", po::value<string>(), "address of arbiter server")
+ ("slavedelay", po::value<int>(), "specify delay (in seconds) to be used when applying master ops to slave")
+ ("fastsync", "indicate that this instance is starting from a dbpath snapshot of the repl peer")
("autoresync", "automatically resync if slave data is stale")
- ("oplogSize", po::value<long>(), "size limit (in MB) for op log")
+ ("oplogSize", po::value<int>(), "size limit (in MB) for op log")
("opIdMem", po::value<long>(), "size limit (in bytes) for in memory storage of op ids")
;
@@ -635,18 +671,12 @@ int main(int argc, char* argv[], char *envp[] )
("cacheSize", po::value<long>(), "cache size (in MB) for rec store")
;
- /* support for -vv -vvvv etc. */
- for (string s = "vv"; s.length() <= 10; s.append("v")) {
- hidden_options.add_options()(s.c_str(), "verbose");
- }
positional_options.add("command", 3);
visible_options.add(general_options);
visible_options.add(replication_options);
visible_options.add(sharding_options);
Module::addOptions( visible_options );
- cmdline_options.add(visible_options);
- cmdline_options.add(hidden_options);
setupSignals();
@@ -677,7 +707,7 @@ int main(int argc, char* argv[], char *envp[] )
bool removeService = false;
bool startService = false;
po::variables_map params;
-
+
string error_message = arg_error_check(argc, argv);
if (error_message != "") {
cout << error_message << endl << endl;
@@ -685,37 +715,9 @@ int main(int argc, char* argv[], char *envp[] )
return 0;
}
- /* don't allow guessing - creates ambiguities when some options are
- * prefixes of others. allow long disguises and don't allow guessing
- * to get away with our vvvvvvv trick. */
- int command_line_style = (((po::command_line_style::unix_style ^
- po::command_line_style::allow_guessing) |
- po::command_line_style::allow_long_disguise) ^
- po::command_line_style::allow_sticky);
- try {
- po::store(po::command_line_parser(argc, argv).options(cmdline_options).
- positional(positional_options).
- style(command_line_style).run(), params);
-
- if (params.count("config")) {
- ifstream config_file (params["config"].as<string>().c_str());
- if (config_file.is_open()) {
- po::store(po::parse_config_file(config_file, cmdline_options), params);
- config_file.close();
- } else {
- cout << "ERROR: could not read from config file" << endl << endl;
- cout << visible_options << endl;
- return 0;
- }
- }
-
- po::notify(params);
- } catch (po::error &e) {
- cout << "ERROR: " << e.what() << endl << endl;
- cout << visible_options << endl;
+ if ( ! CmdLine::store( argc , argv , visible_options , hidden_options , positional_options , params ) )
return 0;
- }
if (params.count("help")) {
show_help_text(visible_options);
@@ -727,16 +729,8 @@ int main(int argc, char* argv[], char *envp[] )
return 0;
}
dbpath = params["dbpath"].as<string>();
- if (params.count("quiet")) {
- cmdLine.quiet = true;
- }
- if (params.count("verbose")) {
- logLevel = 1;
- }
- for (string s = "vv"; s.length() <= 10; s.append("v")) {
- if (params.count(s)) {
- logLevel = s.length();
- }
+ if ( params.count("directoryperdb")) {
+ directoryperdb = true;
}
if (params.count("cpu")) {
cmdLine.cpu = true;
@@ -761,25 +755,11 @@ int main(int argc, char* argv[], char *envp[] )
/* casting away the const-ness here */
appsrvPath = (char*)(params["appsrvpath"].as<string>().c_str());
}
-#ifndef _WIN32
- if (params.count("fork")) {
- if ( ! params.count( "logpath" ) ){
- cout << "--fork has to be used with --logpath" << endl;
- return -1;
- }
- pid_t c = fork();
- if ( c ){
- cout << "forked process: " << c << endl;
- ::exit(0);
- }
- setsid();
- setupSignals();
- }
-#endif
- if (params.count("logpath")) {
- string lp = params["logpath"].as<string>();
- uassert( 10033 , "logpath has to be non-zero" , lp.size() );
- initLogging( lp , params.count( "logappend" ) );
+ if (params.count("repairpath")) {
+ repairpath = params["repairpath"].as<string>();
+ uassert( 12589, "repairpath has to be non-zero", repairpath.size() );
+ } else {
+ repairpath = dbpath;
}
if (params.count("nocursors")) {
useCursors = false;
@@ -790,6 +770,9 @@ int main(int argc, char* argv[], char *envp[] )
if (params.count("nohttpinterface")) {
noHttpInterface = true;
}
+ if (params.count("rest")) {
+ cmdLine.rest = true;
+ }
if (params.count("noscripting")) {
useJNI = false;
}
@@ -831,13 +814,19 @@ int main(int argc, char* argv[], char *envp[] )
startService = true;
}
if (params.count("master")) {
- master = true;
+ replSettings.master = true;
}
if (params.count("slave")) {
- slave = SimpleSlave;
+ replSettings.slave = SimpleSlave;
+ }
+ if (params.count("slavedelay")) {
+ replSettings.slavedelay = params["slavedelay"].as<int>();
+ }
+ if (params.count("fastsync")) {
+ replSettings.fastsync = true;
}
if (params.count("autoresync")) {
- autoresync = true;
+ replSettings.autoresync = true;
}
if (params.count("source")) {
/* specifies what the source in local.sources should be */
@@ -864,7 +853,7 @@ int main(int argc, char* argv[], char *envp[] )
assert(lenForNewNsFiles > 0);
}
if (params.count("oplogSize")) {
- long x = params["oplogSize"].as<long>();
+ long x = params["oplogSize"].as<int>();
uassert( 10035 , "bad --oplogSize arg", x > 0);
cmdLine.oplogSize = x * 1024 * 1024;
assert(cmdLine.oplogSize > 0);
@@ -872,8 +861,8 @@ int main(int argc, char* argv[], char *envp[] )
if (params.count("opIdMem")) {
long x = params["opIdMem"].as<long>();
uassert( 10036 , "bad --opIdMem arg", x > 0);
- opIdMem = x;
- assert(opIdMem > 0);
+ replSettings.opIdMem = x;
+ assert(replSettings.opIdMem > 0);
}
if (params.count("cacheSize")) {
long x = params["cacheSize"].as<long>();
@@ -974,13 +963,13 @@ namespace mongo {
#undef out
- void exitCleanly() {
+ void exitCleanly( ExitCode code ) {
goingAway = true;
killCurrentOp.killAll();
{
dblock lk;
log() << "now exiting" << endl;
- dbexit( EXIT_KILL );
+ dbexit( code );
}
}
@@ -1026,9 +1015,18 @@ namespace mongo {
int x;
sigwait( &asyncSignals, &x );
log() << "got kill or ctrl c signal " << x << " (" << strsignal( x ) << "), will terminate after current cmd ends" << endl;
- exitCleanly();
+ Client::initThread( "interruptThread" );
+ exitCleanly( EXIT_KILL );
}
+ // this will be called in certain c++ error cases, for example if there are two active
+ // exceptions
+ void myterminate() {
+ rawOut( "terminate() called, printing stack:\n" );
+ printStackTrace();
+ abort();
+ }
+
void setupSignals() {
assert( signal(SIGSEGV, abruptQuit) != SIG_ERR );
assert( signal(SIGFPE, abruptQuit) != SIG_ERR );
@@ -1044,12 +1042,15 @@ namespace mongo {
sigaddset( &asyncSignals, SIGTERM );
assert( pthread_sigmask( SIG_SETMASK, &asyncSignals, 0 ) == 0 );
boost::thread it( interruptThread );
+
+ set_terminate( myterminate );
}
#else
void ctrlCTerminate() {
- log() << "got kill or ctrl c signal, will terminate after current cmd ends" << endl;
- exitCleanly();
+ log() << "got kill or ctrl-c signal, will terminate after current cmd ends" << endl;
+ Client::initThread( "ctrlCTerminate" );
+ exitCleanly( EXIT_KILL );
}
BOOL CtrlHandler( DWORD fdwCtrlType )
{
@@ -1086,14 +1087,6 @@ BOOL CtrlHandler( DWORD fdwCtrlType )
}
#endif
-void temptestfoo() {
- MongoMutex m;
- m.lock();
-// m.lock_upgrade();
- m.lock_shared();
-}
-
-
} // namespace mongo
#include "recstore.h"
diff --git a/db/db.h b/db/db.h
index 3475f34..0bbc97b 100644
--- a/db/db.h
+++ b/db/db.h
@@ -18,7 +18,6 @@
#include "../stdafx.h"
#include "../util/message.h"
-#include "../util/top.h"
#include "boost/version.hpp"
#include "concurrency.h"
#include "pdfile.h"
@@ -47,16 +46,36 @@ namespace mongo {
*/
class DatabaseHolder {
public:
+ typedef map<string,Database*> DBs;
+ typedef map<string,DBs> Paths;
+
DatabaseHolder() : _size(0){
}
- Database * get( const string& ns , const string& path ){
+ bool isLoaded( const string& ns , const string& path ) const {
dbMutex.assertAtLeastReadLocked();
- map<string,Database*>& m = _paths[path];
+ Paths::const_iterator x = _paths.find( path );
+ if ( x == _paths.end() )
+ return false;
+ const DBs& m = x->second;
string db = _todb( ns );
- map<string,Database*>::iterator it = m.find(db);
+ DBs::const_iterator it = m.find(db);
+ return it != m.end();
+ }
+
+
+ Database * get( const string& ns , const string& path ) const {
+ dbMutex.assertAtLeastReadLocked();
+ Paths::const_iterator x = _paths.find( path );
+ if ( x == _paths.end() )
+ return 0;
+ const DBs& m = x->second;
+
+ string db = _todb( ns );
+
+ DBs::const_iterator it = m.find(db);
if ( it != m.end() )
return it->second;
return 0;
@@ -64,20 +83,42 @@ namespace mongo {
void put( const string& ns , const string& path , Database * db ){
dbMutex.assertWriteLocked();
- map<string,Database*>& m = _paths[path];
+ DBs& m = _paths[path];
Database*& d = m[_todb(ns)];
if ( ! d )
_size++;
d = db;
}
+ Database* getOrCreate( const string& ns , const string& path , bool& justCreated ){
+ dbMutex.assertWriteLocked();
+ DBs& m = _paths[path];
+
+ string dbname = _todb( ns );
+
+ Database* & db = m[dbname];
+ if ( db ){
+ justCreated = false;
+ return db;
+ }
+
+ log(1) << "Accessing: " << dbname << " for the first time" << endl;
+ db = new Database( dbname.c_str() , justCreated , path );
+ _size++;
+ return db;
+ }
+
+
+
+
void erase( const string& ns , const string& path ){
dbMutex.assertWriteLocked();
- map<string,Database*>& m = _paths[path];
- _size -= m.erase( _todb( ns ) );
+ DBs& m = _paths[path];
+ _size -= (int)m.erase( _todb( ns ) );
}
- bool closeAll( const string& path , BSONObjBuilder& result );
+ /* force - force close even if something underway - use at shutdown */
+ bool closeAll( const string& path , BSONObjBuilder& result, bool force );
int size(){
return _size;
@@ -86,107 +127,68 @@ namespace mongo {
/**
* gets all unique db names, ignoring paths
*/
- void getAllShortNames( set<string>& all ) const{
+ void getAllShortNames( set<string>& all ) const {
dbMutex.assertAtLeastReadLocked();
- for ( map<string, map<string,Database*> >::const_iterator i=_paths.begin(); i!=_paths.end(); i++ ){
- map<string,Database*> m = i->second;
- for( map<string,Database*>::const_iterator j=m.begin(); j!=m.end(); j++ ){
+ for ( Paths::const_iterator i=_paths.begin(); i!=_paths.end(); i++ ){
+ DBs m = i->second;
+ for( DBs::const_iterator j=m.begin(); j!=m.end(); j++ ){
all.insert( j->first );
}
}
}
-
+
private:
- string _todb( const string& ns ){
+ string _todb( const string& ns ) const {
size_t i = ns.find( '.' );
if ( i == string::npos )
return ns;
return ns.substr( 0 , i );
}
- map<string, map<string,Database*> > _paths;
+ Paths _paths;
int _size;
};
extern DatabaseHolder dbHolder;
- /* returns true if the database ("database") did not exist, and it was created on this call
- path - datafiles directory, if not the default, so we can differentiate between db's of the same
- name in different places (for example temp ones on repair).
- */
- inline bool setClient(const char *ns, const string& path , mongolock *lock ) {
- if( logLevel > 5 )
- log() << "setClient: " << ns << endl;
-
- dbMutex.assertAtLeastReadLocked();
-
- Client& c = cc();
- c.top.clientStart( ns );
-
- Database * db = dbHolder.get( ns , path );
- if ( db ){
- c.setns(ns, db );
- return false;
- }
-
- if( lock )
- lock->releaseAndWriteLock();
-
- assertInWriteLock();
-
- char cl[256];
- nsToDatabase(ns, cl);
- bool justCreated;
- Database *newdb = new Database(cl, justCreated, path);
- dbHolder.put(ns,path,newdb);
- c.setns(ns, newdb);
-
- newdb->finishInit();
-
- return justCreated;
- }
-
// shared functionality for removing references to a database from this program instance
// does not delete the files on disk
void closeDatabase( const char *cl, const string& path = dbpath );
-
+
struct dbtemprelease {
- string clientname;
- string clientpath;
- int locktype;
+ Client::Context * _context;
+ int _locktype;
+
dbtemprelease() {
- Client& client = cc();
- Database *database = client.database();
- if ( database ) {
- clientname = database->name;
- clientpath = database->path;
- }
- client.top.clientStop();
- locktype = dbMutex.getState();
- assert( locktype );
- if ( locktype > 0 ) {
- massert( 10298 , "can't temprelease nested write lock", locktype == 1);
+ _context = cc().getContext();
+ _locktype = dbMutex.getState();
+ assert( _locktype );
+
+ if ( _locktype > 0 ) {
+ massert( 10298 , "can't temprelease nested write lock", _locktype == 1);
+ if ( _context ) _context->unlocked();
dbMutex.unlock();
}
else {
- massert( 10299 , "can't temprelease nested read lock", locktype == -1);
+ massert( 10299 , "can't temprelease nested read lock", _locktype == -1);
+ if ( _context ) _context->unlocked();
dbMutex.unlock_shared();
}
+
}
~dbtemprelease() {
- if ( locktype > 0 )
+ if ( _locktype > 0 )
dbMutex.lock();
else
dbMutex.lock_shared();
- if ( clientname.empty() )
- cc().setns("", 0);
- else
- setClient(clientname.c_str(), clientpath.c_str());
+
+ if ( _context ) _context->relocked();
}
};
+
/**
only does a temp release if we're not nested and have a lock
*/
@@ -212,7 +214,6 @@ namespace mongo {
extern TicketHolder connTicketHolder;
-
} // namespace mongo
//#include "dbinfo.h"
diff --git a/db/db.sln b/db/db.sln
index 35fd85f..79ff2e1 100644
--- a/db/db.sln
+++ b/db/db.sln
@@ -15,10 +15,7 @@ EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tools", "tools", "{2B262D59-9DC7-4BF1-A431-1BD4966899A5}"
ProjectSection(SolutionItems) = preProject
..\tools\bridge.cpp = ..\tools\bridge.cpp
- ..\tools\export.cpp = ..\tools\export.cpp
- ..\tools\files.cpp = ..\tools\files.cpp
..\tools\sniffer.cpp = ..\tools\sniffer.cpp
- ..\tools\tool.cpp = ..\tools\tool.cpp
EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mongos", "..\s\dbgrid.vcproj", "{E03717ED-69B4-4D21-BC55-DF6690B585C6}"
diff --git a/db/db.vcproj b/db/db.vcproj
index 6dc0aae..3ea7506 100644
--- a/db/db.vcproj
+++ b/db/db.vcproj
@@ -144,7 +144,7 @@
/>
<Tool
Name="VCLinkerTool"
- AdditionalDependencies="ws2_32.lib"
+ AdditionalDependencies="ws2_32.lib psapi.lib"
LinkIncremental="1"
AdditionalLibraryDirectories="&quot;c:\program files\boost\boost_1_35_0\lib&quot;"
GenerateDebugInformation="true"
@@ -350,48 +350,8 @@
>
</File>
<File
- RelativePath="..\..\js\js\Debug\js.lib"
+ RelativePath="..\..\js\src\js.lib"
>
- <FileConfiguration
- Name="Release|Win32"
- ExcludedFromBuild="true"
- >
- <Tool
- Name="VCCustomBuildTool"
- />
- </FileConfiguration>
- </File>
- <File
- RelativePath="..\..\js\js\Release\js.lib"
- >
- <FileConfiguration
- Name="Debug|Win32"
- ExcludedFromBuild="true"
- >
- <Tool
- Name="VCCustomBuildTool"
- />
- </FileConfiguration>
- <FileConfiguration
- Name="Debug Recstore|Win32"
- ExcludedFromBuild="true"
- >
- <Tool
- Name="VCCustomBuildTool"
- />
- </FileConfiguration>
- </File>
- <File
- RelativePath="C:\Program Files\Java\jdk\lib\jvm.lib"
- >
- <FileConfiguration
- Name="release_nojni|Win32"
- ExcludedFromBuild="true"
- >
- <Tool
- Name="VCCustomBuildTool"
- />
- </FileConfiguration>
</File>
<File
RelativePath="..\pcre-7.4\pcrecpp.cc"
@@ -1342,30 +1302,18 @@
>
</File>
<File
- RelativePath="..\client\quorum.cpp"
+ RelativePath="..\client\syncclusterconnection.cpp"
>
</File>
- <Filter
- Name="btree related"
- >
- <File
- RelativePath=".\btree.cpp"
- >
- </File>
- <File
- RelativePath=".\btree.h"
- >
- </File>
- <File
- RelativePath=".\btreecursor.cpp"
- >
- </File>
- </Filter>
</Filter>
<Filter
Name="db"
>
<File
+ RelativePath=".\background.h"
+ >
+ </File>
+ <File
RelativePath=".\client.h"
>
</File>
@@ -1374,6 +1322,10 @@
>
</File>
<File
+ RelativePath=".\cmdline.cpp"
+ >
+ </File>
+ <File
RelativePath=".\cmdline.h"
>
</File>
@@ -1414,6 +1366,14 @@
>
</File>
<File
+ RelativePath=".\diskloc.h"
+ >
+ </File>
+ <File
+ RelativePath=".\index.h"
+ >
+ </File>
+ <File
RelativePath=".\introspect.h"
>
</File>
@@ -1485,6 +1445,10 @@
RelativePath="..\stdafx.h"
>
</File>
+ <File
+ RelativePath=".\update.h"
+ >
+ </File>
<Filter
Name="cpp"
Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx"
@@ -1507,6 +1471,10 @@
>
</File>
<File
+ RelativePath=".\common.cpp"
+ >
+ </File>
+ <File
RelativePath=".\cursor.cpp"
>
</File>
@@ -1539,10 +1507,6 @@
>
</File>
<File
- RelativePath=".\dbstats.cpp"
- >
- </File>
- <File
RelativePath=".\dbwebserver.cpp"
>
</File>
@@ -1555,6 +1519,10 @@
>
</File>
<File
+ RelativePath=".\index_geo2d.cpp"
+ >
+ </File>
+ <File
RelativePath=".\instance.cpp"
>
</File>
@@ -1671,10 +1639,6 @@
>
</File>
<File
- RelativePath="..\util\top.cpp"
- >
- </File>
- <File
RelativePath=".\update.cpp"
>
</File>
@@ -1884,6 +1848,42 @@
/>
</FileConfiguration>
</File>
+ <File
+ RelativePath="..\scripting\utils.cpp"
+ >
+ </File>
+ </Filter>
+ <Filter
+ Name="stats"
+ >
+ <File
+ RelativePath=".\stats\counters.cpp"
+ >
+ </File>
+ <File
+ RelativePath=".\stats\snapshots.cpp"
+ >
+ </File>
+ <File
+ RelativePath=".\stats\top.cpp"
+ >
+ </File>
+ </Filter>
+ <Filter
+ Name="btree related"
+ >
+ <File
+ RelativePath=".\btree.cpp"
+ >
+ </File>
+ <File
+ RelativePath=".\btree.h"
+ >
+ </File>
+ <File
+ RelativePath=".\btreecursor.cpp"
+ >
+ </File>
</Filter>
</Files>
<Globals>
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index ff072a1..6d1aa5a 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -36,7 +36,8 @@
#include "security.h"
#include "queryoptimizer.h"
#include "../scripting/engine.h"
-#include "dbstats.h"
+#include "stats/counters.h"
+#include "background.h"
namespace mongo {
@@ -56,13 +57,15 @@ namespace mongo {
virtual bool slaveOk() {
return true;
}
+ virtual LockType locktype(){ return WRITE; }
virtual void help( stringstream& help ) const {
help << "shutdown the database. must be ran against admin db and either (1) ran from localhost or (2) authenticated.\n";
}
CmdShutdown() : Command("shutdown") {}
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ cc().shutdown();
log() << "terminating, shutdown command received" << endl;
- dbexit( EXIT_CLEAN );
+ dbexit( EXIT_CLEAN ); // this never returns
return true;
}
} cmdShutdown;
@@ -75,7 +78,7 @@ namespace mongo {
*/
class CmdResetError : public Command {
public:
- virtual bool readOnly() { return true; }
+ virtual LockType locktype(){ return NONE; }
virtual bool requiresAuth() { return false; }
virtual bool logTheOp() {
return false;
@@ -98,7 +101,7 @@ namespace mongo {
/* for diagnostic / testing purposes. */
class CmdSleep : public Command {
public:
- virtual bool readOnly() { return true; }
+ virtual LockType locktype(){ return READ; }
virtual bool adminOnly() { return true; }
virtual bool logTheOp() {
return false;
@@ -118,7 +121,7 @@ namespace mongo {
class CmdGetLastError : public Command {
public:
- virtual bool readOnly() { return true; }
+ virtual LockType locktype(){ return NONE; }
virtual bool requiresAuth() { return false; }
virtual bool logTheOp() {
return false;
@@ -155,6 +158,7 @@ namespace mongo {
virtual bool slaveOk() {
return true;
}
+ virtual LockType locktype(){ return NONE; }
CmdForceError() : Command("forceerror") {}
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
uassert( 10038 , "forced error", false);
@@ -164,7 +168,7 @@ namespace mongo {
class CmdGetPrevError : public Command {
public:
- virtual bool readOnly() { return true; }
+ virtual LockType locktype(){ return NONE; }
virtual bool requiresAuth() { return false; }
virtual bool logTheOp() {
return false;
@@ -199,6 +203,7 @@ namespace mongo {
virtual bool slaveOk() {
return true;
}
+ virtual LockType locktype(){ return NONE; }
CmdSwitchToClientErrors() : Command("switchtoclienterrors") {}
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if ( lastError.getID() ){
@@ -223,9 +228,10 @@ namespace mongo {
virtual bool slaveOk() {
return false;
}
+ virtual LockType locktype(){ return WRITE; }
CmdDropDatabase() : Command("dropDatabase") {}
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- BSONElement e = cmdObj.findElement(name);
+ BSONElement e = cmdObj.getField(name);
log() << "dropDatabase " << ns << endl;
int p = (int) e.number();
if ( p != 1 )
@@ -247,16 +253,17 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << "repair database. also compacts. note: slow.";
}
+ virtual LockType locktype(){ return WRITE; }
CmdRepairDatabase() : Command("repairDatabase") {}
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- BSONElement e = cmdObj.findElement(name);
+ BSONElement e = cmdObj.getField(name);
log() << "repairDatabase " << ns << endl;
int p = (int) e.number();
if ( p != 1 )
return false;
- e = cmdObj.findElement( "preserveClonedFilesOnFailure" );
+ e = cmdObj.getField( "preserveClonedFilesOnFailure" );
bool preserveClonedFilesOnFailure = e.isBoolean() && e.boolean();
- e = cmdObj.findElement( "backupOriginalFiles" );
+ e = cmdObj.getField( "backupOriginalFiles" );
bool backupOriginalFiles = e.isBoolean() && e.boolean();
return repairDatabase( ns, errmsg, preserveClonedFilesOnFailure, backupOriginalFiles );
}
@@ -274,9 +281,10 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << "enable or disable performance profiling";
}
+ virtual LockType locktype(){ return WRITE; }
CmdProfile() : Command("profile") {}
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- BSONElement e = cmdObj.findElement(name);
+ BSONElement e = cmdObj.getField(name);
result.append("was", (double) cc().database()->profile);
int p = (int) e.number();
bool ok = false;
@@ -302,9 +310,15 @@ namespace mongo {
CmdServerStatus() : Command("serverStatus") {
started = time(0);
}
+
+ virtual LockType locktype(){ return NONE; }
+
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+
+ bool authed = cc().getAuthenticationInfo()->isAuthorizedReads("admin");
result.append("uptime",(double) (time(0)-started));
+ result.appendDate( "localTime" , jsTime() );
{
BSONObjBuilder t;
@@ -316,19 +330,19 @@ namespace mongo {
double tl = (double) timeLocked;
t.append("totalTime", tt);
t.append("lockTime", tl);
- t.append("ratio", tl/tt);
+ t.append("ratio", (tt ? tl/tt : 0));
result.append( "globalLock" , t.obj() );
}
- {
-
+ if ( authed ){
+
BSONObjBuilder t( result.subobjStart( "mem" ) );
ProcessInfo p;
if ( p.supported() ){
- t.append( "resident" , p.getResidentSize() );
- t.append( "virtual" , p.getVirtualMemorySize() );
+ t.appendNumber( "resident" , p.getResidentSize() );
+ t.appendNumber( "virtual" , p.getVirtualMemorySize() );
t.appendBool( "supported" , true );
}
else {
@@ -336,7 +350,7 @@ namespace mongo {
t.appendBool( "supported" , false );
}
- t.append( "mapped" , MemoryMappedFile::totalMappedLength() / ( 1024 * 1024 ) );
+ t.appendNumber( "mapped" , MemoryMappedFile::totalMappedLength() / ( 1024 * 1024 ) );
t.done();
@@ -348,7 +362,8 @@ namespace mongo {
bb.append( "available" , connTicketHolder.available() );
bb.done();
}
- {
+
+ if ( authed ){
BSONObjBuilder bb( result.subobjStart( "extra_info" ) );
bb.append("note", "fields vary by platform");
ProcessInfo p;
@@ -356,8 +371,40 @@ namespace mongo {
bb.done();
}
+
+ {
+ BSONObjBuilder bb( result.subobjStart( "indexCounters" ) );
+ globalIndexCounters.append( bb );
+ bb.done();
+ }
+
+ {
+ BSONObjBuilder bb( result.subobjStart( "backgroundFlushing" ) );
+ globalFlushCounters.append( bb );
+ bb.done();
+ }
+
+ if ( anyReplEnabled() ){
+ BSONObjBuilder bb( result.subobjStart( "repl" ) );
+ appendReplicationInfo( bb , authed , cmdObj["repl"].numberInt() );
+ bb.done();
+ }
+
result.append( "opcounters" , globalOpCounters.getObj() );
+ {
+ BSONObjBuilder asserts( result.subobjStart( "asserts" ) );
+ asserts.append( "regular" , assertionCount.regular );
+ asserts.append( "warning" , assertionCount.warning );
+ asserts.append( "msg" , assertionCount.msg );
+ asserts.append( "user" , assertionCount.user );
+ asserts.append( "rollovers" , assertionCount.rollovers );
+ asserts.done();
+ }
+
+ if ( ! authed )
+ result.append( "note" , "run against admin for more info" );
+
return true;
}
time_t started;
@@ -372,6 +419,7 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << "check if any asserts have occurred on the server";
}
+ virtual LockType locktype(){ return WRITE; }
CmdAssertInfo() : Command("assertinfo") {}
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
result.appendBool("dbasserted", lastAssert[0].isSet() || lastAssert[1].isSet() || lastAssert[2].isSet());
@@ -389,8 +437,10 @@ namespace mongo {
virtual bool slaveOk() {
return true;
}
+ virtual LockType locktype(){ return NONE; }
CmdGetOpTime() : Command("getoptime") { }
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ writelock l( "" );
result.appendDate("optime", OpTime::now().asDate());
return true;
}
@@ -416,6 +466,7 @@ namespace mongo {
bool adminOnly() {
return true;
}
+ virtual LockType locktype(){ return WRITE; }
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
int was = _diaglog.setLevel( cmdObj.firstElement().numberInt() );
stringstream ss;
@@ -451,7 +502,12 @@ namespace mongo {
}
} dbc_unittest;
- bool deleteIndexes( NamespaceDetails *d, const char *ns, const char *name, string &errmsg, BSONObjBuilder &anObjBuilder, bool mayDeleteIdIndex ) {
+ void assureSysIndexesEmptied(const char *ns, IndexDetails *exceptForIdIndex);
+ int removeFromSysIndexes(const char *ns, const char *idxName);
+
+ bool dropIndexes( NamespaceDetails *d, const char *ns, const char *name, string &errmsg, BSONObjBuilder &anObjBuilder, bool mayDeleteIdIndex ) {
+
+ BackgroundOperation::assertNoBgOpInProgForNs(ns);
d->aboutToDeleteAnIndex();
@@ -479,7 +535,10 @@ namespace mongo {
}
/* assuming here that id index is not multikey: */
d->multiKeyIndexBits = 0;
- anObjBuilder.append("msg", "all indexes deleted for collection");
+ assureSysIndexesEmptied(ns, idIndex);
+ anObjBuilder.append("msg", mayDeleteIdIndex ?
+ "indexes dropped for collection" :
+ "non-_id indexes dropped for collection");
}
else {
// delete just one index
@@ -503,7 +562,11 @@ namespace mongo {
for ( int i = x; i < d->nIndexes; i++ )
d->idx(i) = d->idx(i+1);
} else {
- log() << "deleteIndexes: " << name << " not found" << endl;
+ int n = removeFromSysIndexes(ns, name); // just in case an orphaned listing there - i.e. should have been repaired but wasn't
+ if( n ) {
+ log() << "info: removeFromSysIndexes cleaned up " << n << " entries" << endl;
+ }
+ log() << "dropIndexes: " << name << " not found" << endl;
errmsg = "index not found";
return false;
}
@@ -524,8 +587,9 @@ namespace mongo {
virtual bool adminOnly() {
return false;
}
+ virtual LockType locktype(){ return WRITE; }
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- string nsToDrop = cc().database()->name + '.' + cmdObj.findElement(name).valuestr();
+ string nsToDrop = cc().database()->name + '.' + cmdObj.getField(name).valuestr();
NamespaceDetails *d = nsdetails(nsToDrop.c_str());
if ( !cmdLine.quiet )
log() << "CMD: drop " << nsToDrop << endl;
@@ -542,14 +606,14 @@ namespace mongo {
/* select count(*) */
class CmdCount : public Command {
public:
- virtual bool readOnly() { return true; }
+ virtual LockType locktype(){ return READ; }
CmdCount() : Command("count") { }
virtual bool logTheOp() {
return false;
}
virtual bool slaveOk() {
// ok on --slave setups, not ok for nonmaster of a repl pair (unless override)
- return slave == SimpleSlave;
+ return replSettings.slave == SimpleSlave;
}
virtual bool slaveOverrideOk() {
return true;
@@ -558,7 +622,7 @@ namespace mongo {
return false;
}
virtual bool run(const char *_ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- string ns = cc().database()->name + '.' + cmdObj.findElement(name).valuestr();
+ string ns = cc().database()->name + '.' + cmdObj.getField(name).valuestr();
string err;
long long n = runCount(ns.c_str(), cmdObj, err);
long long nn = n;
@@ -591,11 +655,12 @@ namespace mongo {
virtual bool adminOnly() {
return false;
}
+ virtual LockType locktype(){ return WRITE; }
virtual void help( stringstream& help ) const {
help << "create a collection";
}
virtual bool run(const char *_ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- string ns = cc().database()->name + '.' + cmdObj.findElement(name).valuestr();
+ string ns = cc().database()->name + '.' + cmdObj.getField(name).valuestr();
string err;
bool ok = userCreateNS(ns.c_str(), cmdObj, err, true);
if ( !ok && !err.empty() )
@@ -604,7 +669,8 @@ namespace mongo {
}
} cmdCreate;
- class CmdDeleteIndexes : public Command {
+ /* "dropIndexes" is now the preferred form - "deleteIndexes" deprecated */
+ class CmdDropIndexes : public Command {
public:
virtual bool logTheOp() {
return true;
@@ -612,21 +678,34 @@ namespace mongo {
virtual bool slaveOk() {
return false;
}
+ virtual LockType locktype(){ return WRITE; }
virtual void help( stringstream& help ) const {
- help << "delete indexes for a collection";
+ help << "drop indexes for a collection";
}
- CmdDeleteIndexes() : Command("deleteIndexes") { }
+ CmdDropIndexes(const char *cmdname = "dropIndexes") : Command(cmdname) { }
bool run(const char *ns, BSONObj& jsobj, string& errmsg, BSONObjBuilder& anObjBuilder, bool /*fromRepl*/) {
- /* note: temp implementation. space not reclaimed! */
- BSONElement e = jsobj.findElement(name.c_str());
+ BSONElement e = jsobj.getField(name.c_str());
string toDeleteNs = cc().database()->name + '.' + e.valuestr();
NamespaceDetails *d = nsdetails(toDeleteNs.c_str());
if ( !cmdLine.quiet )
- log() << "CMD: deleteIndexes " << toDeleteNs << endl;
+ log() << "CMD: dropIndexes " << toDeleteNs << endl;
if ( d ) {
- BSONElement f = jsobj.findElement("index");
+ BSONElement f = jsobj.getField("index");
if ( f.type() == String ) {
- return deleteIndexes( d, toDeleteNs.c_str(), f.valuestr(), errmsg, anObjBuilder, false );
+ return dropIndexes( d, toDeleteNs.c_str(), f.valuestr(), errmsg, anObjBuilder, false );
+ }
+ else if ( f.type() == Object ){
+ int idxId = d->findIndexByKeyPattern( f.embeddedObject() );
+ if ( idxId < 0 ){
+ errmsg = "can't find index with key:";
+ errmsg += f.embeddedObject();
+ return false;
+ }
+ else {
+ IndexDetails& ii = d->idx( idxId );
+ string iName = ii.indexName();
+ return dropIndexes( d, toDeleteNs.c_str(), iName.c_str() , errmsg, anObjBuilder, false );
+ }
}
else {
errmsg = "invalid index name spec";
@@ -638,6 +717,10 @@ namespace mongo {
return false;
}
}
+ } cmdDropIndexes;
+ class CmdDeleteIndexes : public CmdDropIndexes {
+ public:
+ CmdDeleteIndexes() : CmdDropIndexes("deleteIndexes") { }
} cmdDeleteIndexes;
class CmdReIndex : public Command {
@@ -648,14 +731,17 @@ namespace mongo {
virtual bool slaveOk() {
return false;
}
+ virtual LockType locktype(){ return WRITE; }
virtual void help( stringstream& help ) const {
help << "re-index a collection";
}
CmdReIndex() : Command("reIndex") { }
bool run(const char *ns, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
+ BackgroundOperation::assertNoBgOpInProgForNs(ns);
+
static DBDirectClient db;
- BSONElement e = jsobj.findElement(name.c_str());
+ BSONElement e = jsobj.getField(name.c_str());
string toDeleteNs = cc().database()->name + '.' + e.valuestr();
NamespaceDetails *d = nsdetails(toDeleteNs.c_str());
log() << "CMD: reIndex " << toDeleteNs << endl;
@@ -675,9 +761,9 @@ namespace mongo {
}
- bool ok = deleteIndexes( d, toDeleteNs.c_str(), "*" , errmsg, result, true );
+ bool ok = dropIndexes( d, toDeleteNs.c_str(), "*" , errmsg, result, true );
if ( ! ok ){
- errmsg = "deleteIndexes failed";
+ errmsg = "dropIndexes failed";
return false;
}
@@ -693,8 +779,6 @@ namespace mongo {
}
} cmdReIndex;
-
-
class CmdListDatabases : public Command {
public:
virtual bool logTheOp() {
@@ -709,6 +793,7 @@ namespace mongo {
virtual bool adminOnly() {
return true;
}
+ virtual LockType locktype(){ return WRITE; }
CmdListDatabases() : Command("listDatabases") {}
bool run(const char *ns, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
vector< string > dbNames;
@@ -722,8 +807,8 @@ namespace mongo {
b.append( "name", i->c_str() );
boost::intmax_t size = dbSize( i->c_str() );
b.append( "sizeOnDisk", (double) size );
- setClient( i->c_str() );
- b.appendBool( "empty", cc().database()->isEmpty() );
+ Client::Context ctx( *i );
+ b.appendBool( "empty", ctx.db()->isEmpty() );
totalSize += size;
dbInfos.push_back( b.obj() );
@@ -741,8 +826,8 @@ namespace mongo {
BSONObjBuilder b;
b << "name" << name << "sizeOnDisk" << double( 1 );
- setClient( name.c_str() );
- b.appendBool( "empty", cc().database()->isEmpty() );
+ Client::Context ctx( name );
+ b.appendBool( "empty", ctx.db()->isEmpty() );
dbInfos.push_back( b.obj() );
}
@@ -753,13 +838,17 @@ namespace mongo {
}
} cmdListDatabases;
+ /* note an access to a database right after this will open it back up - so this is mainly
+ for diagnostic purposes.
+ */
class CmdCloseAllDatabases : public Command {
public:
virtual bool adminOnly() { return true; }
virtual bool slaveOk() { return false; }
+ virtual LockType locktype(){ return WRITE; }
CmdCloseAllDatabases() : Command( "closeAllDatabases" ) {}
bool run(const char *ns, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
- return dbHolder.closeAll( dbpath , result );
+ return dbHolder.closeAll( dbpath , result, false );
}
} cmdCloseAllDatabases;
@@ -772,6 +861,7 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << " example: { filemd5 : ObjectId(aaaaaaa) , key : { ts : 1 } }";
}
+ virtual LockType locktype(){ return READ; }
bool run(const char *dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
static DBDirectClient db;
@@ -831,6 +921,7 @@ namespace mongo {
public:
CmdMedianKey() : Command( "medianKey" ) {}
virtual bool slaveOk() { return true; }
+ virtual LockType locktype(){ return READ; }
virtual void help( stringstream &help ) const {
help << " example: { medianKey:\"blog.posts\", keyPattern:{x:1}, min:{x:10}, max:{x:55} }\n"
"NOTE: This command may take awhile to run";
@@ -840,6 +931,8 @@ namespace mongo {
BSONObj min = jsobj.getObjectField( "min" );
BSONObj max = jsobj.getObjectField( "max" );
BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );
+
+ Client::Context ctx( ns );
IndexDetails *id = cmdIndexDetailsForRange( ns, errmsg, min, max, keyPattern );
if ( id == 0 )
@@ -872,6 +965,7 @@ namespace mongo {
public:
CmdDatasize() : Command( "datasize" ) {}
virtual bool slaveOk() { return true; }
+ virtual LockType locktype(){ return READ; }
virtual void help( stringstream &help ) const {
help <<
"\ndetermine data size for a set of data in a certain range"
@@ -885,9 +979,10 @@ namespace mongo {
BSONObj max = jsobj.getObjectField( "max" );
BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );
+ Client::Context ctx( ns );
+
auto_ptr< Cursor > c;
if ( min.isEmpty() && max.isEmpty() ) {
- setClient( ns );
c = theDataFileMgr.findAll( ns );
} else if ( min.isEmpty() || max.isEmpty() ) {
errmsg = "only one of min or max specified";
@@ -923,19 +1018,40 @@ namespace mongo {
}
} cmdDatasize;
+ namespace {
+ long long getIndexSizeForCollection(string db, string ns, BSONObjBuilder* details=NULL, int scale = 1 ){
+ DBDirectClient client;
+ auto_ptr<DBClientCursor> indexes =
+ client.query(db + ".system.indexes", QUERY( "ns" << ns));
+
+ long long totalSize = 0;
+ while (indexes->more()){
+ BSONObj index = indexes->nextSafe();
+ NamespaceDetails * nsd = nsdetails( (ns + ".$" + index["name"].valuestrsafe()).c_str() );
+ if (!nsd)
+ continue; // nothing to do here
+ totalSize += nsd->datasize;
+ if (details)
+ details->appendNumber(index["name"].valuestrsafe(), nsd->datasize / scale );
+ }
+ return totalSize;
+ }
+ }
+
class CollectionStats : public Command {
public:
CollectionStats() : Command( "collstats" ) {}
virtual bool slaveOk() { return true; }
+ virtual LockType locktype(){ return READ; }
virtual void help( stringstream &help ) const {
help << " example: { collstats:\"blog.posts\" } ";
}
- bool run(const char *dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
- string ns = dbname;
- if ( ns.find( "." ) != string::npos )
- ns = ns.substr( 0 , ns.find( "." ) );
- ns += ".";
- ns += jsobj.firstElement().valuestr();
+ bool run(const char *dbname_c, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ string dbname = dbname_c;
+ if ( dbname.find( "." ) != string::npos )
+ dbname = dbname.substr( 0 , dbname.find( "." ) );
+
+ string ns = dbname + "." + jsobj.firstElement().valuestr();
NamespaceDetails * nsd = nsdetails( ns.c_str() );
if ( ! nsd ){
@@ -944,12 +1060,25 @@ namespace mongo {
}
result.append( "ns" , ns.c_str() );
-
- result.append( "count" , nsd->nrecords );
- result.append( "size" , nsd->datasize );
- result.append( "storageSize" , nsd->storageSize() );
+
+ int scale = 1;
+ if ( jsobj["scale"].isNumber() )
+ scale = jsobj["scale"].numberInt();
+
+ result.appendNumber( "count" , nsd->nrecords );
+ result.appendNumber( "size" , nsd->datasize / scale );
+ int numExtents;
+ result.appendNumber( "storageSize" , nsd->storageSize( &numExtents ) / scale );
+ result.append( "numExtents" , numExtents );
result.append( "nindexes" , nsd->nIndexes );
+ result.append( "lastExtentSize" , nsd->lastExtentSize / scale );
+ result.append( "paddingFactor" , nsd->paddingFactor );
+ result.append( "flags" , nsd->flags );
+ BSONObjBuilder indexSizes;
+ result.appendNumber( "totalIndexSize" , getIndexSizeForCollection(dbname, ns, &indexSizes, scale) / scale );
+ result.append("indexSizes", indexSizes.obj());
+
if ( nsd->capped ){
result.append( "capped" , nsd->capped );
result.append( "max" , nsd->max );
@@ -959,11 +1088,70 @@ namespace mongo {
}
} cmdCollectionStatis;
+
+ class DBStats : public Command {
+ public:
+ DBStats() : Command( "dbstats" ) {}
+ virtual bool slaveOk() { return true; }
+ virtual LockType locktype(){ return READ; }
+ virtual void help( stringstream &help ) const {
+ help << " example: { dbstats:1 } ";
+ }
+ bool run(const char *dbname_c, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ string dbname = dbname_c;
+ if ( dbname.find( "." ) != string::npos )
+ dbname = dbname.substr( 0 , dbname.find( "." ) );
+
+ DBDirectClient client;
+ const list<string> collections = client.getCollectionNames(dbname);
+
+ long long ncollections = 0;
+ long long objects = 0;
+ long long size = 0;
+ long long storageSize = 0;
+ long long numExtents = 0;
+ long long indexes = 0;
+ long long indexSize = 0;
+
+ for (list<string>::const_iterator it = collections.begin(); it != collections.end(); ++it){
+ const string ns = *it;
+
+ NamespaceDetails * nsd = nsdetails( ns.c_str() );
+ if ( ! nsd ){
+ // should this assert here?
+ continue;
+ }
+
+ ncollections += 1;
+ objects += nsd->nrecords;
+ size += nsd->datasize;
+
+ int temp;
+ storageSize += nsd->storageSize( &temp );
+ numExtents += temp;
+
+ indexes += nsd->nIndexes;
+ indexSize += getIndexSizeForCollection(dbname, ns);
+ }
+
+ result.appendNumber( "collections" , ncollections );
+ result.appendNumber( "objects" , objects );
+ result.appendNumber( "dataSize" , size );
+ result.appendNumber( "storageSize" , storageSize);
+ result.appendNumber( "numExtents" , numExtents );
+ result.appendNumber( "indexes" , indexes );
+ result.appendNumber( "indexSize" , indexSize );
+
+ return true;
+ }
+ } cmdDBStats;
+
class CmdBuildInfo : public Command {
public:
CmdBuildInfo() : Command( "buildinfo" ) {}
virtual bool slaveOk() { return true; }
virtual bool adminOnly() { return true; }
+ virtual LockType locktype(){ return NONE; }
virtual void help( stringstream &help ) const {
help << "example: { buildinfo:1 }";
}
@@ -974,10 +1162,12 @@ namespace mongo {
}
} cmdBuildInfo;
+ /* convertToCapped seems to use this */
class CmdCloneCollectionAsCapped : public Command {
public:
CmdCloneCollectionAsCapped() : Command( "cloneCollectionAsCapped" ) {}
virtual bool slaveOk() { return false; }
+ virtual LockType locktype(){ return WRITE; }
virtual void help( stringstream &help ) const {
help << "example: { cloneCollectionAsCapped:<fromName>, toCollection:<toName>, size:<sizeInBytes> }";
}
@@ -996,15 +1186,13 @@ namespace mongo {
string fromNs = string( realDbName ) + "." + from;
string toNs = string( realDbName ) + "." + to;
- massert( 10300 , "source collection " + fromNs + " does not exist", !setClient( fromNs.c_str() ) );
NamespaceDetails *nsd = nsdetails( fromNs.c_str() );
massert( 10301 , "source collection " + fromNs + " does not exist", nsd );
- long long excessSize = nsd->datasize - size * 2;
+ long long excessSize = nsd->datasize - size * 2; // datasize and extentSize can't be compared exactly, so add some padding to 'size'
DiskLoc extent = nsd->firstExtent;
- for( ; excessSize > 0 && extent != nsd->lastExtent; extent = extent.ext()->xnext ) {
+ for( ; excessSize > extent.ext()->length && extent != nsd->lastExtent; extent = extent.ext()->xnext ) {
excessSize -= extent.ext()->length;
- if ( excessSize > 0 )
- log( 2 ) << "cloneCollectionAsCapped skipping extent of size " << extent.ext()->length << endl;
+ log( 2 ) << "cloneCollectionAsCapped skipping extent of size " << extent.ext()->length << endl;
log( 6 ) << "excessSize: " << excessSize << endl;
}
DiskLoc startLoc = extent.ext()->firstRecord;
@@ -1012,15 +1200,13 @@ namespace mongo {
CursorId id;
{
auto_ptr< Cursor > c = theDataFileMgr.findAll( fromNs.c_str(), startLoc );
- ClientCursor *cc = new ClientCursor();
- cc->c = c;
- cc->ns = fromNs;
+ ClientCursor *cc = new ClientCursor(c, fromNs.c_str(), true);
cc->matcher.reset( new CoveredIndexMatcher( BSONObj(), fromjson( "{$natural:1}" ) ) );
id = cc->cursorid;
}
DBDirectClient client;
- setClient( toNs.c_str() );
+ Client::Context ctx( toNs );
BSONObjBuilder spec;
spec.appendBool( "capped", true );
spec.append( "size", double( size ) );
@@ -1037,14 +1223,22 @@ namespace mongo {
}
} cmdCloneCollectionAsCapped;
+ /* jan2010:
+ Converts the given collection to a capped collection w/ the specified size.
+ This command is not highly used, and is not currently supported with sharded
+ environments.
+ */
class CmdConvertToCapped : public Command {
public:
CmdConvertToCapped() : Command( "convertToCapped" ) {}
virtual bool slaveOk() { return false; }
+ virtual LockType locktype(){ return WRITE; }
virtual void help( stringstream &help ) const {
help << "example: { convertToCapped:<fromCollectionName>, size:<sizeInBytes> }";
}
bool run(const char *dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ BackgroundOperation::assertNoBgOpInProgForDb(dbname);
+
string from = jsobj.getStringField( "convertToCapped" );
long long size = (long long)jsobj.getField( "size" ).number();
@@ -1086,6 +1280,7 @@ namespace mongo {
class GroupCommand : public Command {
public:
GroupCommand() : Command("group"){}
+ virtual LockType locktype(){ return READ; }
virtual bool slaveOk() { return true; }
virtual void help( stringstream &help ) const {
help << "see http://www.mongodb.org/display/DOCS/Aggregation";
@@ -1260,7 +1455,7 @@ namespace mongo {
public:
DistinctCommand() : Command("distinct"){}
virtual bool slaveOk() { return true; }
-
+ virtual LockType locktype(){ return READ; }
virtual void help( stringstream &help ) const {
help << "{ distinct : 'collection name' , key : 'a.b' }";
}
@@ -1268,7 +1463,7 @@ namespace mongo {
bool run(const char *dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
static DBDirectClient db;
- string ns = cc().database()->name + '.' + cmdObj.findElement(name).valuestr();
+ string ns = cc().database()->name + '.' + cmdObj.getField(name).valuestr();
string key = cmdObj["key"].valuestrsafe();
BSONObj keyPattern = BSON( key << 1 );
@@ -1319,6 +1514,7 @@ namespace mongo {
virtual bool slaveOk() {
return false;
}
+ virtual LockType locktype(){ return WRITE; }
virtual bool run(const char *dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
static DBDirectClient db;
@@ -1355,23 +1551,232 @@ namespace mongo {
}
} cmdFindAndModify;
- bool commandIsReadOnly(BSONObj& _cmdobj) {
- BSONObj jsobj;
- {
- BSONElement e = _cmdobj.firstElement();
- if ( e.type() == Object && string("query") == e.fieldName() ) {
- jsobj = e.embeddedObject();
+ /* Returns client's uri */
+ class CmdWhatsMyUri : public Command {
+ public:
+ CmdWhatsMyUri() : Command("whatsmyuri") { }
+ virtual bool logTheOp() {
+ return false; // the modification will be logged directly
+ }
+ virtual bool slaveOk() {
+ return true;
+ }
+ virtual LockType locktype(){ return NONE; }
+ virtual bool requiresAuth() {
+ return false;
+ }
+ virtual void help( stringstream &help ) const {
+ help << "{whatsmyuri:1}";
+ }
+ virtual bool run(const char *dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ BSONObj info = cc().curop()->infoNoauth();
+ result << "you" << info[ "client" ];
+ return true;
+ }
+ } cmdWhatsMyUri;
+
+ /* For testing only, not for general use */
+ class GodInsert : public Command {
+ public:
+ GodInsert() : Command( "godinsert" ) { }
+ virtual bool logTheOp() {
+ return true;
+ }
+ virtual bool slaveOk() {
+ return false;
+ }
+ virtual LockType locktype() { return WRITE; }
+ virtual bool requiresAuth() {
+ return true;
+ }
+ virtual void help( stringstream &help ) const {
+ help << "[for testing only]";
+ }
+ virtual bool run(const char *dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ string coll = cmdObj[ "godinsert" ].valuestrsafe();
+ uassert( 13049, "godinsert must specify a collection", !coll.empty() );
+ string ns = nsToDatabase( dbname ) + "." + coll;
+ BSONObj obj = cmdObj[ "obj" ].embeddedObjectUserCheck();
+ DiskLoc loc = theDataFileMgr.insert( ns.c_str(), obj, true );
+ return true;
+ }
+ } cmdGodInsert;
+
+ class DBHashCmd : public Command {
+ public:
+ DBHashCmd() : Command( "dbhash" ){}
+ virtual bool slaveOk() { return true; }
+ virtual LockType locktype() { return READ; }
+ virtual bool run(const char * badns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ string dbname = nsToDatabase( badns );
+
+ list<string> colls = _db.getCollectionNames( dbname );
+ colls.sort();
+
+ result.appendNumber( "numCollections" , (long long)colls.size() );
+
+ md5_state_t globalState;
+ md5_init(&globalState);
+
+ BSONObjBuilder bb( result.subobjStart( "collections" ) );
+ for ( list<string>::iterator i=colls.begin(); i != colls.end(); i++ ){
+ string c = *i;
+ if ( c.find( ".system.profil" ) != string::npos )
+ continue;
+
+ auto_ptr<Cursor> cursor;
+
+ NamespaceDetails * nsd = nsdetails( c.c_str() );
+ int idNum = nsd->findIdIndex();
+ if ( idNum >= 0 ){
+ cursor.reset( new BtreeCursor( nsd , idNum , nsd->idx( idNum ) , BSONObj() , BSONObj() , false , 1 ) );
+ }
+ else if ( c.find( ".system." ) != string::npos ){
+ continue;
+ }
+ else if ( nsd->capped ){
+ cursor = findTableScan( c.c_str() , BSONObj() );
+ }
+ else {
+ bb.done();
+ errmsg = (string)"can't find _id index for: " + c;
+ return 0;
+ }
+
+ md5_state_t st;
+ md5_init(&st);
+
+ long long n = 0;
+ while ( cursor->ok() ){
+ BSONObj c = cursor->current();
+ md5_append( &st , (const md5_byte_t*)c.objdata() , c.objsize() );
+ n++;
+ cursor->advance();
+ }
+ md5digest d;
+ md5_finish(&st, d);
+ string hash = digestToString( d );
+
+ bb.append( c.c_str() + ( dbname.size() + 1 ) , hash );
+
+ md5_append( &globalState , (const md5_byte_t*)hash.c_str() , hash.size() );
}
- else {
- jsobj = _cmdobj;
+ bb.done();
+
+ md5digest d;
+ md5_finish(&globalState, d);
+ string hash = digestToString( d );
+
+ result.append( "md5" , hash );
+
+ return 1;
+ }
+
+ DBDirectClient _db;
+ } dbhashCmd;
+
+ /**
+ * this handles
+ - auth
+ - locking
+ - context
+ then calls run()
+ */
+ bool execCommand( Command * c ,
+ Client& client , int queryOptions ,
+ const char *ns, BSONObj& cmdObj ,
+ BSONObjBuilder& result,
+ bool fromRepl ){
+
+ string dbname = nsToDatabase( ns );
+
+ AuthenticationInfo *ai = client.getAuthenticationInfo();
+
+ if( c->adminOnly() && c->localHostOnlyIfNoAuth( cmdObj ) && noauth && !ai->isLocalHost ) {
+ result.append( "errmsg" ,
+ "unauthorized: this command must run from localhost when running db without auth" );
+ log() << "command denied: " << cmdObj.toString() << endl;
+ return false;
+ }
+
+
+ if ( c->adminOnly() && ! fromRepl && dbname != "admin" ) {
+ result.append( "errmsg" , "access denied" );
+ log() << "command denied: " << cmdObj.toString() << endl;
+ return false;
+ }
+
+ if ( cmdObj["help"].trueValue() ){
+ stringstream ss;
+ ss << "help for: " << c->name << " ";
+ c->help( ss );
+ result.append( "help" , ss.str() );
+ result.append( "lockType" , c->locktype() );
+ return true;
+ }
+
+ bool canRunHere =
+ isMaster( dbname.c_str() ) ||
+ c->slaveOk() ||
+ ( c->slaveOverrideOk() && ( queryOptions & QueryOption_SlaveOk ) ) ||
+ fromRepl;
+
+ if ( ! canRunHere ){
+ result.append( "errmsg" , "not master" );
+ return false;
+ }
+
+ if ( c->locktype() == Command::NONE ){
+ // we also trust that this won't crash
+ string errmsg;
+ int ok = c->run( ns , cmdObj , errmsg , result , fromRepl );
+ if ( ! ok )
+ result.append( "errmsg" , errmsg );
+ return ok;
+ }
+
+ bool needWriteLock = c->locktype() == Command::WRITE;
+
+ if ( ! c->requiresAuth() &&
+ ( ai->isAuthorizedReads( dbname ) &&
+ ! ai->isAuthorized( dbname ) ) ){
+ // this means that they can read, but not write
+ // so only get a read lock
+ needWriteLock = false;
+ }
+
+ if ( ! needWriteLock ){
+ assert( ! c->logTheOp() );
+ }
+
+ mongolock lk( needWriteLock );
+ Client::Context ctx( ns , dbpath , &lk , c->requiresAuth() );
+
+ if ( c->adminOnly() )
+ log( 2 ) << "command: " << cmdObj << endl;
+
+ try {
+ string errmsg;
+ if ( ! c->run(ns, cmdObj, errmsg, result, fromRepl ) ){
+ result.append( "errmsg" , errmsg );
+ return false;
}
}
- BSONElement e = jsobj.firstElement();
- if ( ! e.type() )
+ catch ( AssertionException& e ){
+ stringstream ss;
+ ss << "assertion: " << e.what();
+ result.append( "errmsg" , ss.str() );
return false;
- return Command::readOnly( e.fieldName() );
+ }
+
+ if ( c->logTheOp() && ! fromRepl ){
+ logOp("c", ns, cmdObj);
+ }
+
+ return true;
}
+
/* TODO make these all command objects -- legacy stuff here
usage:
@@ -1380,9 +1785,11 @@ namespace mongo {
returns true if ran a cmd
*/
bool _runCommands(const char *ns, BSONObj& _cmdobj, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl, int queryOptions) {
+ string dbname = nsToDatabase( ns );
+
if( logLevel >= 1 )
log() << "run command " << ns << ' ' << _cmdobj << endl;
-
+
const char *p = strchr(ns, '.');
if ( !p ) return false;
if ( strcmp(p, ".$cmd") != 0 ) return false;
@@ -1398,59 +1805,14 @@ namespace mongo {
}
}
+ Client& client = cc();
bool ok = false;
BSONElement e = jsobj.firstElement();
-
+
Command * c = e.type() ? Command::findCommand( e.fieldName() ) : 0;
if ( c ){
- string errmsg;
- AuthenticationInfo *ai = currentClient.get()->ai;
- uassert( 10045 , "unauthorized", ai->isAuthorized(cc().database()->name.c_str()) || !c->requiresAuth());
-
- bool admin = c->adminOnly();
-
- if( admin && c->localHostOnlyIfNoAuth(jsobj) && noauth && !ai->isLocalHost ) {
- ok = false;
- errmsg = "unauthorized: this command must run from localhost when running db without auth";
- log() << "command denied: " << jsobj.toString() << endl;
- }
- else if ( admin && !fromRepl && strncmp(ns, "admin", 5) != 0 ) {
- ok = false;
- errmsg = "access denied";
- log() << "command denied: " << jsobj.toString() << endl;
- }
- else if ( isMaster() ||
- c->slaveOk() ||
- ( c->slaveOverrideOk() && ( queryOptions & QueryOption_SlaveOk ) ) ||
- fromRepl ){
- if ( jsobj.getBoolField( "help" ) ) {
- stringstream help;
- help << "help for: " << e.fieldName() << " ";
- c->help( help );
- anObjBuilder.append( "help" , help.str() );
- }
- else {
- if( admin )
- log( 2 ) << "command: " << jsobj << endl;
- try {
- ok = c->run(ns, jsobj, errmsg, anObjBuilder, fromRepl);
- }
- catch ( AssertionException& e ){
- ok = false;
- errmsg = "assertion: ";
- errmsg += e.what();
- }
- if ( ok && c->logTheOp() && !fromRepl )
- logOp("c", ns, jsobj);
- }
- }
- else {
- ok = false;
- errmsg = "not master";
- }
- if ( !ok )
- anObjBuilder.append("errmsg", errmsg);
+ ok = execCommand( c , client , queryOptions , ns , jsobj , anObjBuilder , fromRepl );
}
else {
anObjBuilder.append("errmsg", "no such cmd");
diff --git a/db/dbcommands_admin.cpp b/db/dbcommands_admin.cpp
index 91052bf..7265002 100644
--- a/db/dbcommands_admin.cpp
+++ b/db/dbcommands_admin.cpp
@@ -31,15 +31,36 @@
#include "btree.h"
#include "curop.h"
#include "../util/background.h"
+#include "../scripting/engine.h"
namespace mongo {
+ class FeaturesCmd : public Command {
+ public:
+ FeaturesCmd() : Command( "features" ){}
+
+ virtual bool slaveOk(){ return true; }
+ virtual bool readOnly(){ return true; }
+ virtual LockType locktype(){ return READ; }
+ virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl){
+ result.append( "readlock" , readLockSupported() );
+ if ( globalScriptEngine ){
+ BSONObjBuilder bb( result.subobjStart( "js" ) );
+ result.append( "utf8" , globalScriptEngine->utf8Ok() );
+ bb.done();
+ }
+ return true;
+ }
+
+ } featuresCmd;
+
class CleanCmd : public Command {
public:
CleanCmd() : Command( "clean" ){}
virtual bool slaveOk(){ return true; }
-
+ virtual LockType locktype(){ return WRITE; }
+
bool run(const char *nsRaw, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
string dropns = cc().database()->name + "." + cmdObj.firstElement().valuestrsafe();
@@ -70,6 +91,7 @@ namespace mongo {
return true;
}
+ virtual LockType locktype(){ return WRITE; }
//{ validate: "collectionnamewithoutthedbpart" [, scandata: <bool>] } */
bool run(const char *nsRaw, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
@@ -159,7 +181,7 @@ namespace mongo {
nlen += r->netLength();
c->advance();
}
- if ( d->capped ) {
+ if ( d->capped && !d->capLooped() ) {
ss << " capped outOfOrder:" << outOfOrder;
if ( outOfOrder > 1 ) {
valid = false;
@@ -252,7 +274,7 @@ namespace mongo {
extern bool unlockRequested;
extern unsigned lockedForWriting;
- extern boost::mutex lockedForWritingMutex;
+ extern mongo::mutex lockedForWritingMutex;
/*
class UnlockCommand : public Command {
@@ -283,8 +305,10 @@ namespace mongo {
class LockDBJob : public BackgroundJob {
protected:
void run() {
+ Client::initThread("fsyncjob");
+ Client& c = cc();
{
- boostlock lk(lockedForWritingMutex);
+ scoped_lock lk(lockedForWritingMutex);
lockedForWriting++;
}
readlock lk("");
@@ -299,9 +323,10 @@ namespace mongo {
sleepmillis(20);
}
{
- boostlock lk(lockedForWritingMutex);
+ scoped_lock lk(lockedForWritingMutex);
lockedForWriting--;
}
+ c.shutdown();
}
public:
bool& _ready;
@@ -312,7 +337,7 @@ namespace mongo {
};
public:
FSyncCommand() : Command( "fsync" ){}
-
+ virtual LockType locktype(){ return WRITE; }
virtual bool slaveOk(){ return true; }
virtual bool adminOnly(){ return true; }
/*virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) {
@@ -351,6 +376,18 @@ namespace mongo {
}
} fsyncCmd;
-
+
+ class LogRotateCmd : public Command {
+ public:
+ LogRotateCmd() : Command( "logRotate" ){}
+ virtual LockType locktype(){ return NONE; }
+ virtual bool slaveOk(){ return true; }
+ virtual bool adminOnly(){ return true; }
+ virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ rotateLogs();
+ return 1;
+ }
+
+ } logRotateCmd;
}
diff --git a/db/dbeval.cpp b/db/dbeval.cpp
index e729135..a3be894 100644
--- a/db/dbeval.cpp
+++ b/db/dbeval.cpp
@@ -73,7 +73,7 @@ namespace mongo {
BSONObj args;
{
- BSONElement argsElement = cmd.findElement("args");
+ BSONElement argsElement = cmd.getField("args");
if ( argsElement.type() == Array ) {
args = argsElement.embeddedObject();
if ( edebug ) {
@@ -111,8 +111,16 @@ namespace mongo {
virtual bool slaveOk() {
return false;
}
+ // We need at least read only access to run db.eval - auth for eval'd writes will be checked
+ // as they are requested.
+ virtual bool requiresAuth() {
+ return false;
+ }
+ virtual LockType locktype(){ return WRITE; }
CmdEval() : Command("$eval") { }
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ AuthenticationInfo *ai = cc().getAuthenticationInfo();
+ uassert( 12598 , "$eval reads unauthorized", ai->isAuthorizedReads(cc().database()->name.c_str()));
return dbEval(ns, cmdObj, result, errmsg);
}
} cmdeval;
diff --git a/db/dbmessage.h b/db/dbmessage.h
index 54a2ac3..ba5cf94 100644
--- a/db/dbmessage.h
+++ b/db/dbmessage.h
@@ -16,7 +16,7 @@
#pragma once
-#include "storage.h"
+#include "diskloc.h"
#include "jsobj.h"
#include "namespace.h"
#include "../util/message.h"
@@ -133,8 +133,10 @@ namespace mongo {
return nextjsobj != 0;
}
BSONObj nextJsObj() {
- if ( nextjsobj == data )
+ if ( nextjsobj == data ) {
nextjsobj += strlen(data) + 1; // skip namespace
+ massert( 13066 , "Message contains no documents", theEnd > nextjsobj );
+ }
massert( 10304 , "Remaining data too small for BSON object", theEnd - nextjsobj > 3 );
BSONObj js(nextjsobj);
massert( 10305 , "Invalid object size", js.objsize() > 3 );
@@ -180,7 +182,7 @@ namespace mongo {
int ntoreturn;
int queryOptions;
BSONObj query;
- auto_ptr< FieldMatcher > fields;
+ BSONObj fields;
/* parses the message into the above fields */
QueryMessage(DbMessage& d) {
@@ -189,11 +191,7 @@ namespace mongo {
ntoreturn = d.pullInt();
query = d.nextJsObj();
if ( d.moreJSObjs() ) {
- BSONObj o = d.nextJsObj();
- if (!o.isEmpty()){
- fields = auto_ptr< FieldMatcher >(new FieldMatcher() );
- fields->add( o );
- }
+ fields = d.nextJsObj();
}
queryOptions = d.msg().data->dataAsInt();
}
@@ -222,9 +220,8 @@ namespace mongo {
qr->startingFrom = startingFrom;
qr->nReturned = nReturned;
b.decouple();
- Message *resp = new Message();
- resp->setData(qr, true); // transport will free
- p->reply(requestMsg, *resp, requestMsg.data->id);
+ Message resp(qr, true);
+ p->reply(requestMsg, resp, requestMsg.data->id);
}
} // namespace mongo
diff --git a/db/dbstats.cpp b/db/dbstats.cpp
deleted file mode 100644
index 902b57b..0000000
--- a/db/dbstats.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-// dbstats.cpp
-
-#include "stdafx.h"
-#include "dbstats.h"
-
-namespace mongo {
-
- OpCounters::OpCounters(){
- int zero = 0;
-
- BSONObjBuilder b;
- b.append( "insert" , zero );
- b.append( "query" , zero );
- b.append( "update" , zero );
- b.append( "delete" , zero );
- b.append( "getmore" , zero );
- _obj = b.obj();
-
- _insert = (int*)_obj["insert"].value();
- _query = (int*)_obj["query"].value();
- _update = (int*)_obj["update"].value();
- _delete = (int*)_obj["delete"].value();
- _getmore = (int*)_obj["getmore"].value();
- }
-
- void OpCounters::gotOp( int op ){
- switch ( op ){
- case dbInsert: gotInsert(); break;
- case dbQuery: gotQuery(); break;
- case dbUpdate: gotUpdate(); break;
- case dbDelete: gotDelete(); break;
- case dbGetMore: gotGetMore(); break;
- case dbKillCursors:
- case opReply:
- case dbMsg:
- break;
- default: log() << "OpCounters::gotOp unknown op: " << op << endl;
- }
- }
-
-
- OpCounters globalOpCounters;
-}
diff --git a/db/dbstats.h b/db/dbstats.h
deleted file mode 100644
index c7d6340..0000000
--- a/db/dbstats.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// dbstats.h
-
-#include "../stdafx.h"
-#include "jsobj.h"
-#include "../util/message.h"
-
-namespace mongo {
-
- /**
- * for storing operation counters
- * note: not thread safe. ok with that for speed
- */
- class OpCounters {
- public:
-
- OpCounters();
-
- int * getInsert(){ return _insert; }
- int * getQuery(){ return _query; }
- int * getUpdate(){ return _update; }
- int * getDelete(){ return _delete; }
- int * getGetGore(){ return _getmore; }
-
- void gotInsert(){ _insert[0]++; }
- void gotQuery(){ _query[0]++; }
- void gotUpdate(){ _update[0]++; }
- void gotDelete(){ _delete[0]++; }
- void gotGetMore(){ _getmore[0]++; }
-
- void gotOp( int op );
-
- BSONObj& getObj(){ return _obj; }
- private:
- BSONObj _obj;
- int * _insert;
- int * _query;
- int * _update;
- int * _delete;
- int * _getmore;
- };
-
- extern OpCounters globalOpCounters;
-
-}
diff --git a/db/dbwebserver.cpp b/db/dbwebserver.cpp
index 0e1483c..75d3a92 100644
--- a/db/dbwebserver.cpp
+++ b/db/dbwebserver.cpp
@@ -27,6 +27,9 @@
#include "replset.h"
#include "instance.h"
#include "security.h"
+#include "stats/snapshots.h"
+#include "background.h"
+#include "commands.h"
#include <pcrecpp.h>
#include <boost/date_time/posix_time/posix_time.hpp>
@@ -61,48 +64,6 @@ namespace mongo {
}
unsigned long long start, timeLocked;
};
- Timing tlast;
- const int NStats = 32;
- string lockStats[NStats];
- unsigned q = 0;
-
- void statsThread() {
- /*cout << "TEMP disabled statsthread" << endl;
- if( 1 )
- return;*/
- Client::initThread("stats");
- unsigned long long timeLastPass = 0;
- while ( 1 ) {
- {
- /* todo: do we even need readlock here? if so for what? */
- readlock lk("");
- Top::completeSnapshot();
- q = (q+1)%NStats;
- Timing timing;
- dbMutex.info().getTimingInfo(timing.start, timing.timeLocked);
- unsigned long long now = curTimeMicros64();
- if ( timeLastPass ) {
- unsigned long long dt = now - timeLastPass;
- unsigned long long dlocked = timing.timeLocked - tlast.timeLocked;
- {
- stringstream ss;
- ss << dt / 1000 << '\t';
- ss << dlocked / 1000 << '\t';
- if ( dt )
- ss << (dlocked*100)/dt << '%';
- string s = ss.str();
- if ( cmdLine.cpu )
- log() << "cpu: " << s << endl;
- lockStats[q] = s;
- ClientCursor::idleTimeReport( (unsigned) ((dt - dlocked)/1000) );
- }
- }
- timeLastPass = now;
- tlast = timing;
- }
- sleepsecs(4);
- }
- }
bool _bold;
string bold(bool x) {
@@ -118,14 +79,11 @@ namespace mongo {
// caller locks
void doLockedStuff(stringstream& ss) {
ss << "# databases: " << dbHolder.size() << '\n';
- if ( cc().database() ) {
- ss << "curclient: " << cc().database()->name; // TODO: isn't this useless?
- ss << '\n';
- }
+
ss << bold(ClientCursor::byLocSize()>10000) << "Cursors byLoc.size(): " << ClientCursor::byLocSize() << bold() << '\n';
ss << "\n<b>replication</b>\n";
- ss << "master: " << master << '\n';
- ss << "slave: " << slave << '\n';
+ ss << "master: " << replSettings.master << '\n';
+ ss << "slave: " << replSettings.slave << '\n';
if ( replPair ) {
ss << "replpair:\n";
ss << replPair->getInfo();
@@ -135,26 +93,76 @@ namespace mongo {
ss << "initialSyncCompleted: " << seemCaughtUp;
if ( !seemCaughtUp ) ss << "</b>";
ss << '\n';
-
- ss << "\n<b>DBTOP</b>\n";
- ss << "<table border=1><tr align='left'><th>Namespace</th><th>%</th><th>Reads</th><th>Writes</th><th>Calls</th><th>Time</th>";
- vector< Top::Usage > usage;
- Top::usage( usage );
- for( vector< Top::Usage >::iterator i = usage.begin(); i != usage.end(); ++i )
- ss << setprecision( 2 ) << fixed << "<tr><td>" << i->ns << "</td><td>" << i->pct << "</td><td>"
- << i->reads << "</td><td>" << i->writes << "</td><td>" << i->calls << "</td><td>" << i->time << "</td></tr>\n";
- ss << "</table>";
- ss << "\n<b>dt\ttlocked</b>\n";
- unsigned i = q;
- while ( 1 ) {
- ss << lockStats[i] << '\n';
- i = (i-1)%NStats;
- if ( i == q )
- break;
+ auto_ptr<SnapshotDelta> delta = statsSnapshots.computeDelta();
+ if ( delta.get() ){
+ ss << "\n<b>DBTOP (occurences|percent of elapsed)</b>\n";
+ ss << "<table border=1>";
+ ss << "<tr align='left'>";
+ ss << "<th>NS</th>"
+ "<th colspan=2>total</th>"
+ "<th colspan=2>Reads</th>"
+ "<th colspan=2>Writes</th>"
+ "<th colspan=2>Queries</th>"
+ "<th colspan=2>GetMores</th>"
+ "<th colspan=2>Inserts</th>"
+ "<th colspan=2>Updates</th>"
+ "<th colspan=2>Removes</th>";
+ ss << "</tr>";
+
+ display( ss , (double) delta->elapsed() , "GLOBAL" , delta->globalUsageDiff() );
+
+ Top::UsageMap usage = delta->collectionUsageDiff();
+ for ( Top::UsageMap::iterator i=usage.begin(); i != usage.end(); i++ ){
+ display( ss , (double) delta->elapsed() , i->first , i->second );
+ }
+
+ ss << "</table>";
}
+
+ statsSnapshots.outputLockInfoHTML( ss );
+
+ BackgroundOperation::dump(ss);
}
+ void display( stringstream& ss , double elapsed , const Top::UsageData& usage ){
+ ss << "<td>";
+ ss << usage.count;
+ ss << "</td><td>";
+ double per = 100 * ((double)usage.time)/elapsed;
+ ss << setprecision(2) << fixed << per << "%";
+ ss << "</td>";
+ }
+
+ void display( stringstream& ss , double elapsed , const string& ns , const Top::CollectionData& data ){
+ if ( ns != "GLOBAL" && data.total.count == 0 )
+ return;
+ ss << "<tr><th>" << ns << "</th>";
+
+ display( ss , elapsed , data.total );
+
+ display( ss , elapsed , data.readLock );
+ display( ss , elapsed , data.writeLock );
+
+ display( ss , elapsed , data.queries );
+ display( ss , elapsed , data.getmore );
+ display( ss , elapsed , data.insert );
+ display( ss , elapsed , data.update );
+ display( ss , elapsed , data.remove );
+
+ ss << "</tr>";
+ }
+
+ void tablecell( stringstream& ss , bool b ){
+ ss << "<td>" << (b ? "<b>X</b>" : "") << "</td>";
+ }
+
+
+ template< typename T>
+ void tablecell( stringstream& ss , const T& t ){
+ ss << "<td>" << t << "</td>";
+ }
+
void doUnlockedStuff(stringstream& ss) {
/* this is in the header already ss << "port: " << port << '\n'; */
ss << mongodVersion() << "\n";
@@ -178,21 +186,51 @@ namespace mongo {
ss << "\nreplInfo: " << replInfo << "\n\n";
ss << "Clients:\n";
- ss << "<table border=1><tr align='left'><th>Thread</th><th>Current op</th>\n";
+ ss << "<table border=1>";
+ ss << "<tr align='left'>"
+ << "<th>Thread</th>"
+
+ << "<th>OpId</th>"
+ << "<th>Active</th>"
+ << "<th>LockType</th>"
+ << "<th>Waiting</th>"
+ << "<th>SecsRunning</th>"
+ << "<th>Op</th>"
+ << "<th>NameSpace</th>"
+ << "<th>Query</th>"
+ << "<th>client</th>"
+ << "<th>msg</th>"
+ << "<th>progress</th>"
+
+ << "</tr>\n";
{
- boostlock bl(Client::clientsMutex);
+ scoped_lock bl(Client::clientsMutex);
for( set<Client*>::iterator i = Client::clients.begin(); i != Client::clients.end(); i++ ) {
Client *c = *i;
CurOp& co = *(c->curop());
- ss << "<tr><td>" << c->desc() << "</td><td";
- BSONObj info = co.infoNoauth();
- /*
- if( info.getIntField("inLock") > 0 )
- ss << "style='color:red'";
- else if( info.getIntField("inLock") < 0 )
- ss << "style='color:green'";
- */
- ss << ">" << info << "</td></tr>\n";
+ ss << "<tr><td>" << c->desc() << "</td>";
+
+ tablecell( ss , co.opNum() );
+ tablecell( ss , co.active() );
+ tablecell( ss , co.getLockType() );
+ tablecell( ss , co.isWaitingForLock() );
+ if ( co.active() )
+ tablecell( ss , co.elapsedSeconds() );
+ else
+ tablecell( ss , "" );
+ tablecell( ss , co.getOp() );
+ tablecell( ss , co.getNS() );
+ if ( co.haveQuery() )
+ tablecell( ss , co.query() );
+ else
+ tablecell( ss , "" );
+ tablecell( ss , co.getRemoteString() );
+
+ tablecell( ss , co.getMessage() );
+ tablecell( ss , co.getProgressMeter().toString() );
+
+
+ ss << "</tr>";
}
}
ss << "</table>\n";
@@ -203,7 +241,7 @@ namespace mongo {
if ( from.localhost() )
return true;
- if ( db.findOne( "admin.system.users" , BSONObj() ).isEmpty() )
+ if ( db.findOne( "admin.system.users" , BSONObj() , 0 , QueryOption_SlaveOk ).isEmpty() )
return true;
string auth = getHeader( rq , "Authorization" );
@@ -270,6 +308,23 @@ namespace mongo {
//out() << "url [" << url << "]" << endl;
if ( url.size() > 1 ) {
+
+ if ( url.find( "/_status" ) == 0 ){
+ if ( ! allowed( rq , headers, from ) ){
+ responseCode = 401;
+ responseMsg = "not allowed\n";
+ return;
+ }
+ generateServerStatus( url , responseMsg );
+ responseCode = 200;
+ return;
+ }
+
+ if ( ! cmdLine.rest ){
+ responseCode = 403;
+ responseMsg = "rest is not enabled. use --rest to turn on";
+ return;
+ }
if ( ! allowed( rq , headers, from ) ){
responseCode = 401;
responseMsg = "not allowed\n";
@@ -294,23 +349,18 @@ namespace mongo {
doUnlockedStuff(ss);
- int n = 2000;
- Timer t;
- while ( 1 ) {
- if ( !dbMutex.info().isLocked() ) {
- {
- readlock lk("");
- ss << "time to get dblock: " << t.millis() << "ms\n";
- doLockedStuff(ss);
- }
- break;
+ {
+ Timer t;
+ readlocktry lk( "" , 2000 );
+ if ( lk.got() ){
+ ss << "time to get dblock: " << t.millis() << "ms\n";
+ doLockedStuff(ss);
}
- sleepmillis(1);
- if ( --n < 0 ) {
+ else {
ss << "\n<b>timed out getting dblock</b>\n";
- break;
}
}
+
ss << "</pre></body></html>";
responseMsg = ss.str();
@@ -323,6 +373,51 @@ namespace mongo {
}
}
+ void generateServerStatus( string url , string& responseMsg ){
+ static vector<string> commands;
+ if ( commands.size() == 0 ){
+ commands.push_back( "serverStatus" );
+ commands.push_back( "buildinfo" );
+ }
+
+ BSONObj params;
+ if ( url.find( "?" ) != string::npos ) {
+ parseParams( params , url.substr( url.find( "?" ) + 1 ) );
+ }
+
+ BSONObjBuilder buf(1024);
+
+ for ( unsigned i=0; i<commands.size(); i++ ){
+ string cmd = commands[i];
+
+ Command * c = Command::findCommand( cmd );
+ assert( c );
+ assert( c->locktype() == 0 );
+
+ BSONObj co;
+ {
+ BSONObjBuilder b;
+ b.append( cmd.c_str() , 1 );
+
+ if ( cmd == "serverStatus" && params["repl"].type() ){
+ b.append( "repl" , atoi( params["repl"].valuestr() ) );
+ }
+
+ co = b.obj();
+ }
+
+ string errmsg;
+
+ BSONObjBuilder sub;
+ if ( ! c->run( "admin.$cmd" , co , errmsg , sub , false ) )
+ buf.append( cmd.c_str() , errmsg );
+ else
+ buf.append( cmd.c_str() , sub.obj() );
+ }
+
+ responseMsg = buf.obj().jsonString();
+ }
+
void handleRESTRequest( const char *rq, // the full request
string url,
string& responseMsg,
@@ -341,7 +436,7 @@ namespace mongo {
string coll = url.substr( first + 1 );
string action = "";
- map<string,string> params;
+ BSONObj params;
if ( coll.find( "?" ) != string::npos ) {
parseParams( params , coll.substr( coll.find( "?" ) + 1 ) );
coll = coll.substr( 0 , coll.find( "?" ) );
@@ -361,7 +456,7 @@ namespace mongo {
if ( coll[i] == '/' )
coll[i] = '.';
- string fullns = dbname + "." + coll;
+ string fullns = urlDecode(dbname + "." + coll);
headers.push_back( (string)"x-action: " + action );
headers.push_back( (string)"x-ns: " + fullns );
@@ -387,26 +482,29 @@ namespace mongo {
responseMsg = ss.str();
}
- void handleRESTQuery( string ns , string action , map<string,string> & params , int & responseCode , stringstream & out ) {
+ void handleRESTQuery( string ns , string action , BSONObj & params , int & responseCode , stringstream & out ) {
Timer t;
int skip = _getOption( params["skip"] , 0 );
int num = _getOption( params["limit"] , _getOption( params["count" ] , 1000 ) ); // count is old, limit is new
int one = 0;
- if ( params["one"].size() > 0 && tolower( params["one"][0] ) == 't' ) {
+ if ( params["one"].type() == String && tolower( params["one"].valuestr()[0] ) == 't' ) {
num = 1;
one = 1;
}
BSONObjBuilder queryBuilder;
- for ( map<string,string>::iterator i = params.begin(); i != params.end(); i++ ) {
- if ( ! i->first.find( "filter_" ) == 0 )
+ BSONObjIterator i(params);
+ while ( i.more() ){
+ BSONElement e = i.next();
+ string name = e.fieldName();
+ if ( ! name.find( "filter_" ) == 0 )
continue;
- const char * field = i->first.substr( 7 ).c_str();
- const char * val = i->second.c_str();
+ const char * field = name.substr( 7 ).c_str();
+ const char * val = e.valuestr();
char * temp;
@@ -454,7 +552,7 @@ namespace mongo {
}
// TODO Generate id and revision per couch POST spec
- void handlePost( string ns, const char *body, map<string,string> & params, int & responseCode, stringstream & out ) {
+ void handlePost( string ns, const char *body, BSONObj& params, int & responseCode, stringstream & out ) {
try {
BSONObj obj = fromjson( body );
db.insert( ns.c_str(), obj );
@@ -468,10 +566,12 @@ namespace mongo {
out << "{ \"ok\" : true }";
}
- int _getOption( string val , int def ) {
- if ( val.size() == 0 )
- return def;
- return atoi( val.c_str() );
+ int _getOption( BSONElement e , int def ) {
+ if ( e.isNumber() )
+ return e.numberInt();
+ if ( e.type() == String )
+ return atoi( e.valuestr() );
+ return def;
}
private:
@@ -481,7 +581,6 @@ namespace mongo {
DBDirectClient DbWebServer::db;
void webServerThread() {
- boost::thread thr(statsThread);
Client::initThread("websvr");
DbWebServer mini;
int p = cmdLine.port + 1000;
diff --git a/db/storage.h b/db/diskloc.h
index cc29e60..cc29e60 100644
--- a/db/storage.h
+++ b/db/diskloc.h
diff --git a/db/driverHelpers.cpp b/db/driverHelpers.cpp
new file mode 100644
index 0000000..c2d1b9d
--- /dev/null
+++ b/db/driverHelpers.cpp
@@ -0,0 +1,63 @@
+// driverHelpers.cpp
+
+/**
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**
+ this file has dbcommands that are for drivers
+ mostly helpers
+*/
+
+
+#include "stdafx.h"
+#include "jsobj.h"
+#include "pdfile.h"
+#include "namespace.h"
+#include "commands.h"
+#include "cmdline.h"
+#include "btree.h"
+#include "curop.h"
+#include "../util/background.h"
+#include "../scripting/engine.h"
+
+namespace mongo {
+
+ class BasicDriverHelper : public Command {
+ public:
+ BasicDriverHelper( const char * name ) : Command( name ){}
+
+ virtual LockType locktype(){ return NONE; }
+ virtual bool slaveOk(){ return true; }
+ virtual bool slaveOverrideOk(){ return true; }
+
+ };
+
+ class ObjectIdTest : public BasicDriverHelper {
+ public:
+ ObjectIdTest() : BasicDriverHelper( "driverOIDTest" ){}
+ virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl){
+ if ( cmdObj.firstElement().type() != jstOID ){
+ errmsg = "not oid";
+ return false;
+ }
+
+ const OID& oid = cmdObj.firstElement().__oid();
+ result.append( "oid" , oid );
+ result.append( "str" , oid.str() );
+
+ return true;
+ }
+ } driverObjectIdTest;
+}
diff --git a/db/extsort.cpp b/db/extsort.cpp
index 08b343a..a0b9f7a 100644
--- a/db/extsort.cpp
+++ b/db/extsort.cpp
@@ -27,11 +27,12 @@
namespace mongo {
+ BSONObj BSONObjExternalSorter::extSortOrder;
unsigned long long BSONObjExternalSorter::_compares = 0;
BSONObjExternalSorter::BSONObjExternalSorter( const BSONObj & order , long maxFileSize )
: _order( order.getOwned() ) , _maxFilesize( maxFileSize ) ,
- _cur(0), _curSizeSoFar(0), _sorted(0){
+ _arraySize(1000000), _cur(0), _curSizeSoFar(0), _sorted(0){
stringstream rootpath;
rootpath << dbpath;
@@ -56,13 +57,21 @@ namespace mongo {
wassert( removed == 1 + _files.size() );
}
+ void BSONObjExternalSorter::_sortInMem(){
+ // extSortComp needs to use glbals
+ // qsort_r only seems available on bsd, which is what i really want to use
+ dblock l;
+ extSortOrder = _order;
+ _cur->sort( BSONObjExternalSorter::extSortComp );
+ }
+
void BSONObjExternalSorter::sort(){
uassert( 10048 , "already sorted" , ! _sorted );
-
+
_sorted = true;
if ( _cur && _files.size() == 0 ){
- _cur->sort( MyCmp( _order ) );
+ _sortInMem();
log(1) << "\t\t not using file. size:" << _curSizeSoFar << " _compares:" << _compares << endl;
return;
}
@@ -85,16 +94,20 @@ namespace mongo {
uassert( 10049 , "sorted already" , ! _sorted );
if ( ! _cur ){
- _cur = new InMemory();
+ _cur = new InMemory( _arraySize );
}
- _cur->push_back( pair<BSONObj,DiskLoc>( o.getOwned() , loc ) );
-
+ Data& d = _cur->getNext();
+ d.first = o.getOwned();
+ d.second = loc;
+
long size = o.objsize();
- _curSizeSoFar += size + sizeof( DiskLoc );
+ _curSizeSoFar += size + sizeof( DiskLoc ) + sizeof( BSONObj );
- if ( _curSizeSoFar > _maxFilesize )
+ if ( _cur->hasSpace() == false || _curSizeSoFar > _maxFilesize ){
finishMap();
+ log(1) << "finishing map" << endl;
+ }
}
@@ -105,7 +118,7 @@ namespace mongo {
if ( _cur->size() == 0 )
return;
- _cur->sort( MyCmp( _order ) );
+ _sortInMem();
stringstream ss;
ss << _root.string() << "/file." << _files.size();
@@ -113,10 +126,10 @@ namespace mongo {
ofstream out;
out.open( file.c_str() , ios_base::out | ios_base::binary );
- uassert( 10051 , (string)"couldn't open file: " + file , out.good() );
+ ASSERT_STREAM_GOOD( 10051 , (string)"couldn't open file: " + file , out );
int num = 0;
- for ( InMemory::iterator i=_cur->begin(); i != _cur->end(); i++ ){
+ for ( InMemory::iterator i=_cur->begin(); i != _cur->end(); ++i ){
Data p = *i;
out.write( p.first.objdata() , p.first.objsize() );
out.write( (char*)(&p.second) , sizeof( DiskLoc ) );
@@ -169,10 +182,12 @@ namespace mongo {
return false;
}
- pair<BSONObj,DiskLoc> BSONObjExternalSorter::Iterator::next(){
+ BSONObjExternalSorter::Data BSONObjExternalSorter::Iterator::next(){
if ( _in ){
- return *(_it++);
+ Data& d = *_it;
+ ++_it;
+ return d;
}
Data best;
@@ -204,7 +219,7 @@ namespace mongo {
BSONObjExternalSorter::FileIterator::FileIterator( string file ){
long length;
- _buf = (char*)_file.map( file.c_str() , length );
+ _buf = (char*)_file.map( file.c_str() , length , MemoryMappedFile::SEQUENTIAL );
massert( 10308 , "mmap failed" , _buf );
assert( (unsigned long)length == file_size( file ) );
_end = _buf + length;
@@ -216,7 +231,7 @@ namespace mongo {
return _buf < _end;
}
- pair<BSONObj,DiskLoc> BSONObjExternalSorter::FileIterator::next(){
+ BSONObjExternalSorter::Data BSONObjExternalSorter::FileIterator::next(){
BSONObj o( _buf );
_buf += o.objsize();
DiskLoc * l = (DiskLoc*)_buf;
diff --git a/db/extsort.h b/db/extsort.h
index 5bfa86f..60ee423 100644
--- a/db/extsort.h
+++ b/db/extsort.h
@@ -22,9 +22,11 @@
#include "jsobj.h"
#include "namespace.h"
#include "curop.h"
+#include "../util/array.h"
namespace mongo {
+
/**
for sorting by BSONObj and attaching a value
*/
@@ -32,8 +34,21 @@ namespace mongo {
public:
typedef pair<BSONObj,DiskLoc> Data;
-
+
private:
+ static BSONObj extSortOrder;
+
+ static int extSortComp( const void *lv, const void *rv ){
+ RARELY killCurrentOp.checkForInterrupt();
+ _compares++;
+ Data * l = (Data*)lv;
+ Data * r = (Data*)rv;
+ int cmp = l->first.woCompare( r->first , extSortOrder );
+ if ( cmp )
+ return cmp;
+ return l->second.compare( r->second );
+ };
+
class FileIterator : boost::noncopyable {
public:
FileIterator( string file );
@@ -57,13 +72,14 @@ namespace mongo {
return x < 0;
return l.second.compare( r.second ) < 0;
};
+
private:
BSONObj _order;
};
-
- public:
- typedef list<Data> InMemory;
+ public:
+
+ typedef FastArray<Data> InMemory;
class Iterator : boost::noncopyable {
public:
@@ -102,8 +118,17 @@ namespace mongo {
int numFiles(){
return _files.size();
}
+
+ long getCurSizeSoFar(){ return _curSizeSoFar; }
+
+ void hintNumObjects( long long numObjects ){
+ if ( numObjects < _arraySize )
+ _arraySize = (int)(numObjects + 100);
+ }
private:
+
+ void _sortInMem();
void sort( string file );
void finishMap();
@@ -112,6 +137,7 @@ namespace mongo {
long _maxFilesize;
path _root;
+ int _arraySize;
InMemory * _cur;
long _curSizeSoFar;
diff --git a/db/flushtest.cpp b/db/flushtest.cpp
index a301e0e..00cebcf 100644
--- a/db/flushtest.cpp
+++ b/db/flushtest.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
#include "stdafx.h"
#include <stdio.h>
#include "../util/goodies.h"
diff --git a/db/index.cpp b/db/index.cpp
index fab6918..5ec2658 100644
--- a/db/index.cpp
+++ b/db/index.cpp
@@ -21,22 +21,80 @@
#include "index.h"
#include "btree.h"
#include "query.h"
+#include "background.h"
namespace mongo {
+ map<string,IndexPlugin*> * IndexPlugin::_plugins;
+
+ IndexType::IndexType( const IndexPlugin * plugin , const IndexSpec * spec )
+ : _plugin( plugin ) , _spec( spec ){
+
+ }
+
+ IndexType::~IndexType(){
+ }
+
+ const BSONObj& IndexType::keyPattern() const {
+ return _spec->keyPattern;
+ }
+
+ IndexPlugin::IndexPlugin( const string& name )
+ : _name( name ){
+ if ( ! _plugins )
+ _plugins = new map<string,IndexPlugin*>();
+ (*_plugins)[name] = this;
+ }
+
+ int IndexType::compare( const BSONObj& l , const BSONObj& r ) const {
+ return l.woCompare( r , _spec->keyPattern );
+ }
+
+
+ int removeFromSysIndexes(const char *ns, const char *idxName) {
+ string system_indexes = cc().database()->name + ".system.indexes";
+ BSONObjBuilder b;
+ b.append("ns", ns);
+ b.append("name", idxName); // e.g.: { name: "ts_1", ns: "foo.coll" }
+ BSONObj cond = b.done();
+ return (int) deleteObjects(system_indexes.c_str(), cond, false, false, true);
+ }
+
+ /* this is just an attempt to clean up old orphaned stuff on a delete all indexes
+ call. repair database is the clean solution, but this gives one a lighter weight
+ partial option. see dropIndexes()
+ */
+ void assureSysIndexesEmptied(const char *ns, IndexDetails *idIndex) {
+ string system_indexes = cc().database()->name + ".system.indexes";
+ BSONObjBuilder b;
+ b.append("ns", ns);
+ if( idIndex ) {
+ b.append("name", BSON( "$ne" << idIndex->indexName().c_str() ));
+ }
+ BSONObj cond = b.done();
+ int n = (int) deleteObjects(system_indexes.c_str(), cond, false, false, true);
+ if( n ) {
+ log() << "info: assureSysIndexesEmptied cleaned up " << n << " entries" << endl;
+ }
+ }
+
+ const IndexSpec& IndexDetails::getSpec() const {
+ scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
+ return NamespaceDetailsTransient::get_inlock( info.obj()["ns"].valuestr() ).getIndexSpec( this );
+ }
+
/* delete this index. does NOT clean up the system catalog
(system.indexes or system.namespaces) -- only NamespaceIndex.
*/
void IndexDetails::kill_idx() {
string ns = indexNamespace(); // e.g. foo.coll.$ts_1
+
+ string pns = parentNS(); // note we need a copy, as parentNS() won't work after the drop() below
// clean up parent namespace index cache
- NamespaceDetailsTransient::get_w( parentNS().c_str() ).deletedIndex();
+ NamespaceDetailsTransient::get_w( pns.c_str() ).deletedIndex();
- BSONObjBuilder b;
- b.append("name", indexName().c_str());
- b.append("ns", parentNS().c_str());
- BSONObj cond = b.done(); // e.g.: { name: "ts_1", ns: "foo.coll" }
+ string name = indexName();
/* important to catch exception here so we can finish cleanup below. */
try {
@@ -48,22 +106,44 @@ namespace mongo {
head.setInvalid();
info.setInvalid();
- // clean up in system.indexes. we do this last on purpose. note we have
- // to make the cond object before the drop() above though.
- string system_indexes = cc().database()->name + ".system.indexes";
- int n = deleteObjects(system_indexes.c_str(), cond, false, false, true);
+ // clean up in system.indexes. we do this last on purpose.
+ int n = removeFromSysIndexes(pns.c_str(), name.c_str());
wassert( n == 1 );
}
+
+ void IndexSpec::reset( const IndexDetails * details ){
+ _details = details;
+ reset( details->info );
+ }
+
+ void IndexSpec::reset( const DiskLoc& loc ){
+ info = loc.obj();
+ keyPattern = info["key"].embeddedObjectUserCheck();
+ if ( keyPattern.objsize() == 0 ) {
+ out() << info.toString() << endl;
+ assert(false);
+ }
+ _init();
+ }
+
void IndexSpec::_init(){
- assert( keys.objsize() );
+ assert( keyPattern.objsize() );
- BSONObjIterator i( keys );
+ string pluginName = "";
+
+ BSONObjIterator i( keyPattern );
BSONObjBuilder nullKeyB;
while( i.more() ) {
- _fieldNames.push_back( i.next().fieldName() );
+ BSONElement e = i.next();
+ _fieldNames.push_back( e.fieldName() );
_fixed.push_back( BSONElement() );
nullKeyB.appendNull( "" );
+ if ( e.type() == String ){
+ uassert( 13007 , "can only have 1 index plugin / bad index key pattern" , pluginName.size() == 0 );
+ pluginName = e.valuestr();
+ }
+
}
_nullKey = nullKeyB.obj();
@@ -72,10 +152,25 @@ namespace mongo {
b.appendNull( "" );
_nullObj = b.obj();
_nullElt = _nullObj.firstElement();
+
+ if ( pluginName.size() ){
+ IndexPlugin * plugin = IndexPlugin::get( pluginName );
+ if ( ! plugin ){
+ log() << "warning: can't find plugin [" << pluginName << "]" << endl;
+ }
+ else {
+ _indexType.reset( plugin->generate( this ) );
+ }
+ }
+ _finishedInit = true;
}
-
+
void IndexSpec::getKeys( const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const {
+ if ( _indexType.get() ){
+ _indexType->getKeys( obj , keys );
+ return;
+ }
vector<const char*> fieldNames( _fieldNames );
vector<BSONElement> fixed( _fixed );
_getKeys( fieldNames , fixed , obj, keys );
@@ -115,7 +210,7 @@ namespace mongo {
if ( allFound ) {
if ( arrElt.eoo() ) {
// no terminal array element to expand
- BSONObjBuilder b;
+ BSONObjBuilder b(_sizeTracker);
for( vector< BSONElement >::iterator i = fixed.begin(); i != fixed.end(); ++i )
b.appendAs( *i, "" );
keys.insert( b.obj() );
@@ -125,7 +220,7 @@ namespace mongo {
BSONObjIterator i( arrElt.embeddedObject() );
if ( i.more() ){
while( i.more() ) {
- BSONObjBuilder b;
+ BSONObjBuilder b(_sizeTracker);
for( unsigned j = 0; j < fixed.size(); ++j ) {
if ( j == arrIdx )
b.appendAs( i.next(), "" );
@@ -137,7 +232,7 @@ namespace mongo {
}
else if ( fixed.size() > 1 ){
// x : [] - need to insert undefined
- BSONObjBuilder b;
+ BSONObjBuilder b(_sizeTracker);
for( unsigned j = 0; j < fixed.size(); ++j ) {
if ( j == arrIdx )
b.appendUndefined( "" );
@@ -165,7 +260,7 @@ namespace mongo {
Keys will be left empty if key not found in the object.
*/
void IndexDetails::getKeysFromObject( const BSONObj& obj, BSONObjSetDefaultOrder& keys) const {
- NamespaceDetailsTransient::get_w( info.obj()["ns"].valuestr() ).getIndexSpec( this ).getKeys( obj, keys );
+ getSpec().getKeys( obj, keys );
}
void setDifference(BSONObjSetDefaultOrder &l, BSONObjSetDefaultOrder &r, vector<BSONObj*> &diff) {
@@ -185,27 +280,27 @@ namespace mongo {
}
void getIndexChanges(vector<IndexChanges>& v, NamespaceDetails& d, BSONObj newObj, BSONObj oldObj) {
- v.resize(d.nIndexes);
+ int z = d.nIndexesBeingBuilt();
+ v.resize(z);
NamespaceDetails::IndexIterator i = d.ii();
- while( i.more() ) {
- int j = i.pos();
- IndexDetails& idx = i.next();
+ for( int i = 0; i < z; i++ ) {
+ IndexDetails& idx = d.idx(i);
BSONObj idxKey = idx.info.obj().getObjectField("key"); // eg { ts : 1 }
- IndexChanges& ch = v[j];
+ IndexChanges& ch = v[i];
idx.getKeysFromObject(oldObj, ch.oldkeys);
idx.getKeysFromObject(newObj, ch.newkeys);
if( ch.newkeys.size() > 1 )
- d.setIndexIsMultikey(j);
+ d.setIndexIsMultikey(i);
setDifference(ch.oldkeys, ch.newkeys, ch.removed);
setDifference(ch.newkeys, ch.oldkeys, ch.added);
}
}
- void dupCheck(vector<IndexChanges>& v, NamespaceDetails& d) {
- NamespaceDetails::IndexIterator i = d.ii();
- while( i.more() ) {
- int j = i.pos();
- v[j].dupCheck(i.next());
+ void dupCheck(vector<IndexChanges>& v, NamespaceDetails& d, DiskLoc curObjLoc) {
+ int z = d.nIndexesBeingBuilt();
+ for( int i = 0; i < z; i++ ) {
+ IndexDetails& idx = d.idx(i);
+ v[i].dupCheck(idx, curObjLoc);
}
}
@@ -248,6 +343,12 @@ namespace mongo {
uassert(10097, "bad table to index name on add index attempt",
cc().database()->name == nsToDatabase(sourceNS.c_str()));
+ /* we can't build a new index for the ns if a build is already in progress in the background -
+ EVEN IF this is a foreground build.
+ */
+ uassert(12588, "cannot add index with a background operation in progress",
+ !BackgroundOperation::inProgForNs(sourceNS.c_str()));
+
BSONObj key = io.getObjectField("key");
uassert(12524, "index key pattern too large", key.objsize() <= 2048);
if( !validKeyPattern(key) ) {
@@ -303,4 +404,40 @@ namespace mongo {
return true;
}
+ bool anyElementNamesMatch( const BSONObj& a , const BSONObj& b ){
+ BSONObjIterator x(a);
+ while ( x.more() ){
+ BSONElement e = x.next();
+ BSONObjIterator y(b);
+ while ( y.more() ){
+ BSONElement f = y.next();
+ FieldCompareResult res = compareDottedFieldNames( e.fieldName() , f.fieldName() );
+ if ( res == SAME || res == LEFT_SUBFIELD || res == RIGHT_SUBFIELD )
+ return true;
+ }
+ }
+ return false;
+ }
+
+ IndexSuitability IndexSpec::suitability( const BSONObj& query , const BSONObj& order ) const {
+ if ( _indexType.get() )
+ return _indexType->suitability( query , order );
+ return _suitability( query , order );
+ }
+
+ IndexSuitability IndexSpec::_suitability( const BSONObj& query , const BSONObj& order ) const {
+ // TODO: optimize
+ if ( anyElementNamesMatch( keyPattern , query ) == 0 && anyElementNamesMatch( keyPattern , order ) == 0 )
+ return USELESS;
+ return HELPFUL;
+ }
+
+ IndexSuitability IndexType::suitability( const BSONObj& query , const BSONObj& order ) const {
+ return _spec->_suitability( query , order );
+ }
+
+ bool IndexType::scanAndOrderRequired( const BSONObj& query , const BSONObj& order ) const {
+ return ! order.isEmpty();
+ }
+
}
diff --git a/db/index.h b/db/index.h
index 696e84d..6965f11 100644
--- a/db/index.h
+++ b/db/index.h
@@ -19,46 +19,136 @@
#pragma once
#include "../stdafx.h"
+#include "diskloc.h"
+#include "jsobj.h"
+#include <map>
namespace mongo {
+
+ class IndexSpec;
+ class IndexType; // TODO: this name sucks
+ class IndexPlugin;
+ class IndexDetails;
+
+ enum IndexSuitability { USELESS = 0 , HELPFUL = 1 , OPTIMAL = 2 };
+
+ /**
+ * this represents an instance of a index plugin
+ * done this way so parsing, etc... can be cached
+ * so if there is a FTS IndexPlugin, for each index using FTS
+ * there will be 1 of these, and it can have things pre-parsed, etc...
+ */
+ class IndexType : boost::noncopyable {
+ public:
+ IndexType( const IndexPlugin * plugin , const IndexSpec * spec );
+ virtual ~IndexType();
+
+ virtual void getKeys( const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const = 0;
+ virtual auto_ptr<Cursor> newCursor( const BSONObj& query , const BSONObj& order , int numWanted ) const = 0;
+
+ /** optional op : changes query to match what's in the index */
+ virtual BSONObj fixKey( const BSONObj& in ) { return in; }
+
+ /** optional op : compare 2 objects with regards to this index */
+ virtual int compare( const BSONObj& l , const BSONObj& r ) const;
+
+ /** @return plugin */
+ const IndexPlugin * getPlugin() const { return _plugin; }
+
+ const BSONObj& keyPattern() const;
+
+ virtual IndexSuitability suitability( const BSONObj& query , const BSONObj& order ) const ;
+
+ virtual bool scanAndOrderRequired( const BSONObj& query , const BSONObj& order ) const ;
+
+ protected:
+ const IndexPlugin * _plugin;
+ const IndexSpec * _spec;
+ };
+ /**
+ * this represents a plugin
+ * a plugin could be something like full text search, sparse index, etc...
+ * 1 of these exists per type of index per server
+ * 1 IndexType is created per index using this plugin
+ */
+ class IndexPlugin : boost::noncopyable {
+ public:
+ IndexPlugin( const string& name );
+ virtual ~IndexPlugin(){}
+
+ virtual IndexType* generate( const IndexSpec * spec ) const = 0;
+
+ static IndexPlugin* get( const string& name ){
+ if ( ! _plugins )
+ return 0;
+ map<string,IndexPlugin*>::iterator i = _plugins->find( name );
+ if ( i == _plugins->end() )
+ return 0;
+ return i->second;
+ }
+
+ string getName() const { return _name; }
+ private:
+ string _name;
+ static map<string,IndexPlugin*> * _plugins;
+ };
+
+ /* precomputed details about an index, used for inserting keys on updates
+ stored/cached in NamespaceDetailsTransient, or can be used standalone
+ */
class IndexSpec {
public:
- BSONObj keys;
- BSONObj meta;
+ BSONObj keyPattern; // e.g., { name : 1 }
+ BSONObj info; // this is the same as IndexDetails::info.obj()
- IndexSpec(){
+ IndexSpec()
+ : _details(0) , _finishedInit(false){
}
IndexSpec( const BSONObj& k , const BSONObj& m = BSONObj() )
- : keys(k) , meta(m){
+ : keyPattern(k) , info(m) , _details(0) , _finishedInit(false){
_init();
}
-
+
/**
- this is a DickLock of an IndexDetails info
+ this is a DiscLoc of an IndexDetails info
should have a key field
*/
IndexSpec( const DiskLoc& loc ){
reset( loc );
}
- void reset( const DiskLoc& loc ){
- meta = loc.obj();
- keys = meta["key"].embeddedObjectUserCheck();
- if ( keys.objsize() == 0 ) {
- out() << meta.toString() << endl;
- assert(false);
-
- }
- _init();
- }
+ void reset( const DiskLoc& loc );
+ void reset( const IndexDetails * details );
void getKeys( const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const;
- private:
+ BSONElement missingField() const { return _nullElt; }
+
+ string getTypeName() const {
+ if ( _indexType.get() )
+ return _indexType->getPlugin()->getName();
+ return "";
+ }
+
+ IndexType* getType() const {
+ return _indexType.get();
+ }
+
+ const IndexDetails * getDetails() const {
+ return _details;
+ }
+
+ IndexSuitability suitability( const BSONObj& query , const BSONObj& order ) const ;
+
+ protected:
+
+ IndexSuitability _suitability( const BSONObj& query , const BSONObj& order ) const ;
void _getKeys( vector<const char*> fieldNames , vector<BSONElement> fixed , const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const;
+
+ BSONSizeTracker _sizeTracker;
vector<const char*> _fieldNames;
vector<BSONElement> _fixed;
@@ -67,14 +157,23 @@ namespace mongo {
BSONObj _nullObj;
BSONElement _nullElt;
+ shared_ptr<IndexType> _indexType;
+
+ const IndexDetails * _details;
+
void _init();
+
+ public:
+ bool _finishedInit;
+
+ friend class IndexType;
};
/* Details about a particular index. There is one of these effectively for each object in
system.namespaces (although this also includes the head pointer, which is not in that
collection).
- ** MemoryMapped Record **
+ ** MemoryMapped Record ** (i.e., this is on disk data)
*/
class IndexDetails {
public:
@@ -117,6 +216,7 @@ namespace mongo {
/* true if the specified key is in the index */
bool hasKey(const BSONObj& key);
+ bool wouldCreateDup(const BSONObj& key, DiskLoc self);
// returns name of this index's storage area
// database.table.$index
@@ -172,6 +272,8 @@ namespace mongo {
(system.indexes or system.namespaces) -- only NamespaceIndex.
*/
void kill_idx();
+
+ const IndexSpec& getSpec() const;
operator string() const {
return info.obj().toString();
@@ -184,15 +286,20 @@ namespace mongo {
vector<BSONObj*> removed; // these keys were removed as part of the change
vector<BSONObj*> added; // these keys were added as part of the change
- void dupCheck(IndexDetails& idx) {
+ /** @curObjLoc - the object we want to add's location. if it is already in the
+ index, that is allowed here (for bg indexing case).
+ */
+ void dupCheck(IndexDetails& idx, DiskLoc curObjLoc) {
if( added.empty() || !idx.unique() )
return;
- for( vector<BSONObj*>::iterator i = added.begin(); i != added.end(); i++ )
- uassert( 11001 , "E11001 duplicate key on update", !idx.hasKey(**i));
+ for( vector<BSONObj*>::iterator i = added.begin(); i != added.end(); i++ ) {
+ bool dup = idx.wouldCreateDup(**i, curObjLoc);
+ uassert( 11001 , "E11001 duplicate key on update", !dup);
+ }
}
};
class NamespaceDetails;
void getIndexChanges(vector<IndexChanges>& v, NamespaceDetails& d, BSONObj newObj, BSONObj oldObj);
- void dupCheck(vector<IndexChanges>& v, NamespaceDetails& d);
+ void dupCheck(vector<IndexChanges>& v, NamespaceDetails& d, DiskLoc curObjLoc);
} // namespace mongo
diff --git a/db/index_geo2d.cpp b/db/index_geo2d.cpp
new file mode 100644
index 0000000..4730c29
--- /dev/null
+++ b/db/index_geo2d.cpp
@@ -0,0 +1,1675 @@
+// geo2d.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "stdafx.h"
+#include "namespace.h"
+#include "jsobj.h"
+#include "index.h"
+#include "../util/unittest.h"
+#include "commands.h"
+#include "pdfile.h"
+#include "btree.h"
+#include "curop.h"
+#include "matcher.h"
+
+//#define GEODEBUG(x) cout << x << endl;
+#define GEODEBUG(x)
+
+namespace mongo {
+
+ const string GEO2DNAME = "2d";
+
+ class GeoBitSets {
+ public:
+ GeoBitSets(){
+ for ( int i=0; i<32; i++ ){
+ masks32[i] = ( 1 << ( 31 - i ) );
+ }
+ for ( int i=0; i<64; i++ ){
+ masks64[i] = ( 1LL << ( 63 - i ) );
+ }
+ }
+ int masks32[32];
+ long long masks64[64];
+ } geoBitSets;
+
+
+ class GeoHash {
+ public:
+ GeoHash()
+ : _hash(0),_bits(0){
+ }
+
+ GeoHash( const char * hash ){
+ init( hash );
+ }
+
+ GeoHash( const string& hash ){
+ init( hash );
+ }
+
+ GeoHash( const BSONElement& e , unsigned bits=32 ){
+ _bits = bits;
+ if ( e.type() == BinData ){
+ int len = 0;
+ _copy( (char*)&_hash , e.binData( len ) );
+ assert( len == 8 );
+ _bits = bits;
+ }
+ else {
+ cout << "GeoHash cons e : " << e << endl;
+ uassert(13047,"wrong type for geo index. if you're using a pre-release version, need to rebuild index",0);
+ }
+ _fix();
+ }
+
+ GeoHash( unsigned x , unsigned y , unsigned bits=32){
+ init( x , y , bits );
+ }
+
+ GeoHash( const GeoHash& old ){
+ _hash = old._hash;
+ _bits = old._bits;
+ }
+
+ GeoHash( long long hash , unsigned bits )
+ : _hash( hash ) , _bits( bits ){
+ _fix();
+ }
+
+ void init( unsigned x , unsigned y , unsigned bits ){
+ assert( bits <= 32 );
+ _hash = 0;
+ _bits = bits;
+ for ( unsigned i=0; i<bits; i++ ){
+ if ( isBitSet( x , i ) ) _hash |= geoBitSets.masks64[i*2];
+ if ( isBitSet( y , i ) ) _hash |= geoBitSets.masks64[(i*2)+1];
+ }
+ }
+
+ void unhash( unsigned& x , unsigned& y ) const {
+ x = 0;
+ y = 0;
+ for ( unsigned i=0; i<_bits; i++ ){
+ if ( getBitX(i) )
+ x |= geoBitSets.masks32[i];
+ if ( getBitY(i) )
+ y |= geoBitSets.masks32[i];
+ }
+ }
+
+ /**
+ * @param 0 = high
+ */
+ static bool isBitSet( unsigned val , unsigned bit ){
+ return geoBitSets.masks32[bit] & val;
+ }
+
+ GeoHash up() const {
+ return GeoHash( _hash , _bits - 1 );
+ }
+
+ bool hasPrefix( const GeoHash& other ) const {
+ assert( other._bits <= _bits );
+ if ( other._bits == 0 )
+ return true;
+ long long x = other._hash ^ _hash;
+ x = x >> (64-(other._bits*2));
+ return x == 0;
+ }
+
+
+ string toString() const {
+ StringBuilder buf( _bits * 2 );
+ for ( unsigned x=0; x<_bits*2; x++ )
+ buf.append( _hash & geoBitSets.masks64[x] ? "1" : "0" );
+ return buf.str();
+ }
+
+ string toStringHex1() const {
+ stringstream ss;
+ ss << hex << _hash;
+ return ss.str();
+ }
+
+ void init( const string& s ){
+ _hash = 0;
+ _bits = s.size() / 2;
+ for ( unsigned pos=0; pos<s.size(); pos++ )
+ if ( s[pos] == '1' )
+ setBit( pos , 1 );
+ }
+
+ void setBit( unsigned pos , bool one ){
+ assert( pos < _bits * 2 );
+ if ( one )
+ _hash |= geoBitSets.masks64[pos];
+ else if ( _hash & geoBitSets.masks64[pos] )
+ _hash &= ~geoBitSets.masks64[pos];
+ }
+
+ bool getBit( unsigned pos ) const {
+ return _hash & geoBitSets.masks64[pos];
+ }
+
+ bool getBitX( unsigned pos ) const {
+ assert( pos < 32 );
+ return getBit( pos * 2 );
+ }
+
+ bool getBitY( unsigned pos ) const {
+ assert( pos < 32 );
+ return getBit( ( pos * 2 ) + 1 );
+ }
+
+ BSONObj wrap() const {
+ BSONObjBuilder b(20);
+ append( b , "" );
+ BSONObj o = b.obj();
+ assert( o.objsize() == 20 );
+ return o;
+ }
+
+ bool constrains() const {
+ return _bits > 0;
+ }
+
+ void move( int x , int y ){
+ assert( _bits );
+ _move( 0 , x );
+ _move( 1 , y );
+ }
+
+ void _move( unsigned offset , int d ){
+ if ( d == 0 )
+ return;
+ assert( d <= 1 && d>= -1 ); // TEMP
+
+ bool from, to;
+ if ( d > 0 ){
+ from = 0;
+ to = 1;
+ }
+ else {
+ from = 1;
+ to = 0;
+ }
+
+ unsigned pos = ( _bits * 2 ) - 1;
+ if ( offset == 0 )
+ pos--;
+ while ( true ){
+ if ( getBit(pos) == from ){
+ setBit( pos , to );
+ return;
+ }
+
+ if ( pos < 2 ){
+ // overflow
+ for ( ; pos < ( _bits * 2 ) ; pos += 2 ){
+ setBit( pos , from );
+ }
+ return;
+ }
+
+ setBit( pos , from );
+ pos -= 2;
+ }
+
+ assert(0);
+ }
+
+ GeoHash& operator=(const GeoHash& h) {
+ _hash = h._hash;
+ _bits = h._bits;
+ return *this;
+ }
+
+ bool operator==(const GeoHash& h ){
+ return _hash == h._hash && _bits == h._bits;
+ }
+
+ GeoHash& operator+=( const char * s ) {
+ unsigned pos = _bits * 2;
+ _bits += strlen(s) / 2;
+ assert( _bits <= 32 );
+ while ( s[0] ){
+ if ( s[0] == '1' )
+ setBit( pos , 1 );
+ pos++;
+ s++;
+ }
+
+ return *this;
+ }
+
+ GeoHash operator+( const char * s ) const {
+ GeoHash n = *this;
+ n+=s;
+ return n;
+ }
+
+ void _fix(){
+ if ( ( _hash << ( _bits * 2 ) ) == 0 )
+ return;
+ long long mask = 0;
+ for ( unsigned i=0; i<_bits*2; i++ )
+ mask |= geoBitSets.masks64[i];
+ _hash &= mask;
+ }
+
+ void append( BSONObjBuilder& b , const char * name ) const {
+ char buf[8];
+ _copy( buf , (char*)&_hash );
+ b.appendBinData( name , 8 , bdtCustom , buf );
+ }
+
+ long long getHash() const {
+ return _hash;
+ }
+
+ GeoHash commonPrefix( const GeoHash& other ) const {
+ unsigned i=0;
+ for ( ; i<_bits && i<other._bits; i++ ){
+ if ( getBitX( i ) == other.getBitX( i ) &&
+ getBitY( i ) == other.getBitY( i ) )
+ continue;
+ break;
+ }
+ return GeoHash(_hash,i);
+ }
+ private:
+
+ void _copy( char * dst , const char * src ) const {
+ for ( unsigned a=0; a<8; a++ ){
+ dst[a] = src[7-a];
+ }
+ }
+
+ long long _hash;
+ unsigned _bits; // bits per field, so 1 to 32
+ };
+
+ ostream& operator<<( ostream &s, const GeoHash &h ){
+ s << h.toString();
+ return s;
+ } // end GeoHash
+
+ class Geo2dType : public IndexType {
+ public:
+ Geo2dType( const IndexPlugin * plugin , const IndexSpec* spec )
+ : IndexType( plugin , spec ){
+
+ BSONObjBuilder orderBuilder;
+
+ BSONObjIterator i( spec->keyPattern );
+ while ( i.more() ){
+ BSONElement e = i.next();
+ if ( e.type() == String && GEO2DNAME == e.valuestr() ){
+ uassert( 13022 , "can't have 2 geo field" , _geo.size() == 0 );
+ uassert( 13023 , "2d has to be first in index" , _other.size() == 0 );
+ _geo = e.fieldName();
+ }
+ else {
+ _other.push_back( e.fieldName() );
+ }
+ orderBuilder.append( "" , 1 );
+ }
+
+ uassert( 13024 , "no geo field specified" , _geo.size() );
+
+ _bits = _configval( spec , "bits" , 26 ); // for lat/long, ~ 1ft
+
+ uassert( 13028 , "can't have more than 32 bits in geo index" , _bits <= 32 );
+
+ _max = _configval( spec , "max" , 180 );
+ _min = _configval( spec , "min" , -180 );
+
+ _scaling = (1024*1024*1024*4.0)/(_max-_min);
+
+ _order = orderBuilder.obj();
+ }
+
+ int _configval( const IndexSpec* spec , const string& name , int def ){
+ BSONElement e = spec->info[name];
+ if ( e.isNumber() )
+ return e.numberInt();
+ return def;
+ }
+
+ ~Geo2dType(){
+
+ }
+
+ virtual BSONObj fixKey( const BSONObj& in ) {
+ if ( in.firstElement().type() == BinData )
+ return in;
+
+ BSONObjBuilder b(in.objsize()+16);
+
+ if ( in.firstElement().isABSONObj() )
+ _hash( in.firstElement().embeddedObject() ).append( b , "" );
+ else if ( in.firstElement().type() == String )
+ GeoHash( in.firstElement().valuestr() ).append( b , "" );
+ else if ( in.firstElement().type() == RegEx )
+ GeoHash( in.firstElement().regex() ).append( b , "" );
+ else
+ return in;
+
+ BSONObjIterator i(in);
+ i.next();
+ while ( i.more() )
+ b.append( i.next() );
+ return b.obj();
+ }
+
+ virtual void getKeys( const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const {
+ BSONElement geo = obj.getFieldDotted(_geo.c_str());
+ if ( geo.eoo() )
+ return;
+
+ BSONObjBuilder b(64);
+
+ if ( ! geo.isABSONObj() )
+ return;
+
+ BSONObj embed = geo.embeddedObject();
+ if ( embed.isEmpty() )
+ return;
+
+ _hash( embed ).append( b , "" );
+
+ for ( size_t i=0; i<_other.size(); i++ ){
+ BSONElement e = obj[_other[i]];
+ if ( e.eoo() )
+ e = _spec->missingField();
+ b.appendAs( e , "" );
+ }
+ keys.insert( b.obj() );
+ }
+
+ GeoHash _tohash( const BSONElement& e ) const {
+ if ( e.isABSONObj() )
+ return _hash( e.embeddedObject() );
+
+ return GeoHash( e , _bits );
+ }
+
+ GeoHash _hash( const BSONObj& o ) const {
+ BSONObjIterator i(o);
+ uassert( 13067 , "geo field is empty" , i.more() );
+ BSONElement x = i.next();
+ uassert( 13068 , "geo field only has 1 element" , i.more() );
+ BSONElement y = i.next();
+
+ uassert( 13026 , "geo values have to be numbers" , x.isNumber() && y.isNumber() );
+
+ return _hash( x.number() , y.number() );
+ }
+
+ GeoHash _hash( double x , double y ) const {
+ return GeoHash( _convert(x), _convert(y) , _bits );
+ }
+
+ BSONObj _unhash( const GeoHash& h ) const {
+ unsigned x , y;
+ h.unhash( x , y );
+ BSONObjBuilder b;
+ b.append( "x" , _unconvert( x ) );
+ b.append( "y" , _unconvert( y ) );
+ return b.obj();
+ }
+
+ unsigned _convert( double in ) const {
+ uassert( 13027 , "point not in range" , in <= _max && in >= _min );
+ in -= _min;
+ assert( in > 0 );
+ return (unsigned)(in * _scaling);
+ }
+
+ double _unconvert( unsigned in ) const {
+ double x = in;
+ x /= _scaling;
+ x += _min;
+ return x;
+ }
+
+ void _unconvert( const GeoHash& h , double& x , double& y ) const {
+ unsigned a,b;
+ h.unhash(a,b);
+ x = _unconvert( a );
+ y = _unconvert( b );
+ }
+
+ double distance( const GeoHash& a , const GeoHash& b ) const {
+ double ax,ay,bx,by;
+ _unconvert( a , ax , ay );
+ _unconvert( b , bx , by );
+
+ double dx = bx - ax;
+ double dy = by - ay;
+
+ return sqrt( ( dx * dx ) + ( dy * dy ) );
+ }
+
+ double size( const GeoHash& a ) const {
+ GeoHash b = a;
+ b.move( 1 , 1 );
+ return distance( a , b );
+ }
+
+ const IndexDetails* getDetails() const {
+ return _spec->getDetails();
+ }
+
+ virtual auto_ptr<Cursor> newCursor( const BSONObj& query , const BSONObj& order , int numWanted ) const;
+
+ virtual IndexSuitability suitability( const BSONObj& query , const BSONObj& order ) const {
+ BSONElement e = query.getFieldDotted(_geo.c_str());
+ switch ( e.type() ){
+ case Object: {
+ BSONObj sub = e.embeddedObject();
+ switch ( sub.firstElement().getGtLtOp() ){
+ case BSONObj::opNEAR:
+ case BSONObj::opWITHIN:
+ return OPTIMAL;
+ default:;
+ }
+ }
+ case Array:
+ return HELPFUL;
+ default:
+ return USELESS;
+ }
+ }
+
+ string _geo;
+ vector<string> _other;
+
+ unsigned _bits;
+ int _max;
+ int _min;
+ double _scaling;
+
+ BSONObj _order;
+ };
+
+ class Point {
+ public:
+
+ Point( const Geo2dType * g , const GeoHash& hash ){
+ g->_unconvert( hash , _x , _y );
+ }
+
+ Point( double x , double y )
+ : _x( x ) , _y( y ){
+ }
+
+ Point() : _x(0),_y(0){
+ }
+
+ GeoHash hash( const Geo2dType * g ){
+ return g->_hash( _x , _y );
+ }
+
+ string toString() const {
+ StringBuilder buf(32);
+ buf << "(" << _x << "," << _y << ")";
+ return buf.str();
+
+ }
+
+ double _x;
+ double _y;
+ };
+
+ class Box {
+ public:
+
+ Box( const Geo2dType * g , const GeoHash& hash )
+ : _min( g , hash ) ,
+ _max( _min._x + g->size( hash ) , _min._y + g->size( hash ) ){
+ }
+
+ Box( double x , double y , double size )
+ : _min( x , y ) ,
+ _max( x + size , y + size ){
+ }
+
+ Box( Point min , Point max )
+ : _min( min ) , _max( max ){
+ }
+
+ Box(){}
+
+ string toString() const {
+ StringBuilder buf(64);
+ buf << _min.toString() << " -->> " << _max.toString();
+ return buf.str();
+ }
+
+ operator string() const {
+ return toString();
+ }
+
+ bool between( double min , double max , double val , double fudge=0) const {
+ return val + fudge >= min && val <= max + fudge;
+ }
+
+ bool mid( double amin , double amax , double bmin , double bmax , bool min , double& res ) const {
+ assert( amin < amax );
+ assert( bmin < bmax );
+
+ if ( amin < bmin ){
+ if ( amax < bmin )
+ return false;
+ res = min ? bmin : amax;
+ return true;
+ }
+ if ( amin > bmax )
+ return false;
+ res = min ? amin : bmax;
+ return true;
+ }
+
+ double intersects( const Box& other ) const {
+
+ Point boundMin(0,0);
+ Point boundMax(0,0);
+
+ if ( mid( _min._x , _max._x , other._min._x , other._max._x , true , boundMin._x ) == false ||
+ mid( _min._x , _max._x , other._min._x , other._max._x , false , boundMax._x ) == false ||
+ mid( _min._y , _max._y , other._min._y , other._max._y , true , boundMin._y ) == false ||
+ mid( _min._y , _max._y , other._min._y , other._max._y , false , boundMax._y ) == false )
+ return 0;
+
+ Box intersection( boundMin , boundMax );
+
+ return intersection.area() / ( ( area() + other.area() ) / 2 );
+ }
+
+ double area() const {
+ return ( _max._x - _min._x ) * ( _max._y - _min._y );
+ }
+
+ Point center() const {
+ return Point( ( _min._x + _max._x ) / 2 ,
+ ( _min._y + _max._y ) / 2 );
+ }
+
+ bool inside( Point p , double fudge = 0 ){
+ bool res = inside( p._x , p._y , fudge );
+ //cout << "is : " << p.toString() << " in " << toString() << " = " << res << endl;
+ return res;
+ }
+
+ bool inside( double x , double y , double fudge = 0 ){
+ return
+ between( _min._x , _max._x , x , fudge ) &&
+ between( _min._y , _max._y , y , fudge );
+ }
+
+ Point _min;
+ Point _max;
+ };
+
+ class Geo2dPlugin : public IndexPlugin {
+ public:
+ Geo2dPlugin() : IndexPlugin( GEO2DNAME ){
+ }
+
+ virtual IndexType* generate( const IndexSpec* spec ) const {
+ return new Geo2dType( this , spec );
+ }
+ } geo2dplugin;
+
+ struct GeoUnitTest : public UnitTest {
+
+ int round( double d ){
+ return (int)(.5+(d*1000));
+ }
+
+#define GEOHEQ(a,b) if ( a.toString() != b ){ cout << "[" << a.toString() << "] != [" << b << "]" << endl; assert( a == b ); }
+
+ void run(){
+ assert( ! GeoHash::isBitSet( 0 , 0 ) );
+ assert( ! GeoHash::isBitSet( 0 , 31 ) );
+ assert( GeoHash::isBitSet( 1 , 31 ) );
+
+ IndexSpec i( BSON( "loc" << "2d" ) );
+ Geo2dType g( &geo2dplugin , &i );
+ {
+ double x = 73.01212;
+ double y = 41.352964;
+ BSONObj in = BSON( "x" << x << "y" << y );
+ GeoHash h = g._hash( in );
+ BSONObj out = g._unhash( h );
+ assert( round(x) == round( out["x"].number() ) );
+ assert( round(y) == round( out["y"].number() ) );
+ assert( round( in["x"].number() ) == round( out["x"].number() ) );
+ assert( round( in["y"].number() ) == round( out["y"].number() ) );
+ }
+
+ {
+ double x = -73.01212;
+ double y = 41.352964;
+ BSONObj in = BSON( "x" << x << "y" << y );
+ GeoHash h = g._hash( in );
+ BSONObj out = g._unhash( h );
+ assert( round(x) == round( out["x"].number() ) );
+ assert( round(y) == round( out["y"].number() ) );
+ assert( round( in["x"].number() ) == round( out["x"].number() ) );
+ assert( round( in["y"].number() ) == round( out["y"].number() ) );
+ }
+
+ {
+ GeoHash h( "0000" );
+ h.move( 0 , 1 );
+ GEOHEQ( h , "0001" );
+ h.move( 0 , -1 );
+ GEOHEQ( h , "0000" );
+
+ h.init( "0001" );
+ h.move( 0 , 1 );
+ GEOHEQ( h , "0100" );
+ h.move( 0 , -1 );
+ GEOHEQ( h , "0001" );
+
+
+ h.init( "0000" );
+ h.move( 1 , 0 );
+ GEOHEQ( h , "0010" );
+ }
+
+ {
+ Box b( 5 , 5 , 2 );
+ assert( "(5,5) -->> (7,7)" == b.toString() );
+ }
+
+ {
+ GeoHash a = g._hash( 1 , 1 );
+ GeoHash b = g._hash( 4 , 5 );
+ assert( 5 == (int)(g.distance( a , b ) ) );
+ a = g._hash( 50 , 50 );
+ b = g._hash( 42 , 44 );
+ assert( round(10) == round(g.distance( a , b )) );
+ }
+
+ {
+ GeoHash x("0000");
+ assert( 0 == x.getHash() );
+ x.init( 0 , 1 , 32 );
+ GEOHEQ( x , "0000000000000000000000000000000000000000000000000000000000000001" )
+
+ assert( GeoHash( "1100").hasPrefix( GeoHash( "11" ) ) );
+ assert( ! GeoHash( "1000").hasPrefix( GeoHash( "11" ) ) );
+ }
+
+ {
+ GeoHash x("1010");
+ GEOHEQ( x , "1010" );
+ GeoHash y = x + "01";
+ GEOHEQ( y , "101001" );
+ }
+
+ {
+
+ GeoHash a = g._hash( 5 , 5 );
+ GeoHash b = g._hash( 5 , 7 );
+ GeoHash c = g._hash( 100 , 100 );
+ /*
+ cout << "a: " << a << endl;
+ cout << "b: " << b << endl;
+ cout << "c: " << c << endl;
+
+ cout << "a: " << a.toStringHex1() << endl;
+ cout << "b: " << b.toStringHex1() << endl;
+ cout << "c: " << c.toStringHex1() << endl;
+ */
+ BSONObj oa = a.wrap();
+ BSONObj ob = b.wrap();
+ BSONObj oc = c.wrap();
+ /*
+ cout << "a: " << oa.hexDump() << endl;
+ cout << "b: " << ob.hexDump() << endl;
+ cout << "c: " << oc.hexDump() << endl;
+ */
+ assert( oa.woCompare( ob ) < 0 );
+ assert( oa.woCompare( oc ) < 0 );
+
+ }
+
+ {
+ GeoHash x( "000000" );
+ x.move( -1 , 0 );
+ GEOHEQ( x , "101010" );
+ x.move( 1 , -1 );
+ GEOHEQ( x , "010101" );
+ x.move( 0 , 1 );
+ GEOHEQ( x , "000000" );
+ }
+
+ {
+ GeoHash prefix( "110011000000" );
+ GeoHash entry( "1100110000011100000111000001110000011100000111000001000000000000" );
+ assert( ! entry.hasPrefix( prefix ) );
+
+ entry = "1100110000001100000111000001110000011100000111000001000000000000";
+ assert( entry.toString().find( prefix.toString() ) == 0 );
+ assert( entry.hasPrefix( GeoHash( "1100" ) ) );
+ assert( entry.hasPrefix( prefix ) );
+ }
+
+ {
+ GeoHash a = g._hash( 50 , 50 );
+ GeoHash b = g._hash( 48 , 54 );
+ assert( round( 4.47214 ) == round( g.distance( a , b ) ) );
+ }
+
+
+ {
+ Box b( Point( 29.762283 , -95.364271 ) , Point( 29.764283000000002 , -95.36227099999999 ) );
+ assert( b.inside( 29.763 , -95.363 ) );
+ assert( ! b.inside( 32.9570255 , -96.1082497 ) );
+ assert( ! b.inside( 32.9570255 , -96.1082497 , .01 ) );
+ }
+
+ {
+ GeoHash a( "11001111" );
+ assert( GeoHash( "11" ) == a.commonPrefix( "11" ) );
+ assert( GeoHash( "11" ) == a.commonPrefix( "11110000" ) );
+ }
+
+ }
+ } geoUnitTest;
+
+ class GeoPoint {
+ public:
+ GeoPoint(){
+ }
+
+ GeoPoint( const KeyNode& node , double distance )
+ : _key( node.key ) , _loc( node.recordLoc ) , _o( node.recordLoc.obj() ) , _distance( distance ){
+ }
+
+ GeoPoint( const BSONObj& key , DiskLoc loc , double distance )
+ : _key(key) , _loc(loc) , _o( loc.obj() ) , _distance( distance ){
+ }
+
+ bool operator<( const GeoPoint& other ) const {
+ return _distance < other._distance;
+ }
+
+ bool isEmpty() const {
+ return _o.isEmpty();
+ }
+
+ BSONObj _key;
+ DiskLoc _loc;
+ BSONObj _o;
+ double _distance;
+ };
+
+ class GeoAccumulator {
+ public:
+ GeoAccumulator( const Geo2dType * g , const BSONObj& filter )
+ : _g(g) , _lookedAt(0) , _objectsLoaded(0) , _found(0) {
+ if ( ! filter.isEmpty() ){
+ _matcher.reset( new CoveredIndexMatcher( filter , g->keyPattern() ) );
+ }
+ }
+
+ virtual ~GeoAccumulator(){
+ }
+
+ virtual void add( const KeyNode& node ){
+ // when looking at other boxes, don't want to look at some object twice
+ if ( _seen.count( node.recordLoc ) ){
+ GEODEBUG( "\t\t\t\t already seen : " << node.recordLoc.obj()["_id"] );
+ return;
+ }
+ _seen.insert( node.recordLoc );
+ _lookedAt++;
+
+ // distance check
+ double d = 0;
+ if ( ! checkDistance( GeoHash( node.key.firstElement() ) , d ) ){
+ GEODEBUG( "\t\t\t\t bad distance : " << node.recordLoc.obj() << "\t" << d );
+ return;
+ }
+
+ // matcher
+ MatchDetails details;
+ if ( _matcher.get() ){
+ bool good = _matcher->matches( node.key , node.recordLoc , &details );
+ if ( details.loadedObject )
+ _objectsLoaded++;
+
+ if ( ! good ){
+ GEODEBUG( "\t\t\t\t didn't match : " << node.recordLoc.obj()["_id"] );
+ return;
+ }
+ }
+
+ if ( ! details.loadedObject ) // dont double count
+ _objectsLoaded++;
+
+ addSpecific( node , d );
+ _found++;
+ }
+
+ virtual void addSpecific( const KeyNode& node , double d ) = 0;
+ virtual bool checkDistance( const GeoHash& node , double& d ) = 0;
+
+ long long found() const {
+ return _found;
+ }
+
+ const Geo2dType * _g;
+ set<DiskLoc> _seen;
+ auto_ptr<CoveredIndexMatcher> _matcher;
+
+ long long _lookedAt;
+ long long _objectsLoaded;
+ long long _found;
+ };
+
+ class GeoHopper : public GeoAccumulator {
+ public:
+ typedef multiset<GeoPoint> Holder;
+
+ GeoHopper( const Geo2dType * g , unsigned max , const GeoHash& n , const BSONObj& filter = BSONObj() )
+ : GeoAccumulator( g , filter ) , _max( max ) , _near( n ) {
+
+ }
+
+ virtual bool checkDistance( const GeoHash& h , double& d ){
+ d = _g->distance( _near , h );
+ bool good = _points.size() < _max || d < farthest();
+ GEODEBUG( "\t\t\t\t\t\t\t checkDistance " << _near << "\t" << h << "\t" << d
+ << " ok: " << good << " farthest: " << farthest() );
+ return good;
+ }
+
+ virtual void addSpecific( const KeyNode& node , double d ){
+ GEODEBUG( "\t\t" << GeoHash( node.key.firstElement() ) << "\t" << node.recordLoc.obj() << "\t" << d );
+ _points.insert( GeoPoint( node.key , node.recordLoc , d ) );
+ if ( _points.size() > _max ){
+ _points.erase( --_points.end() );
+ }
+ }
+
+ double farthest(){
+ if ( _points.size() == 0 )
+ return -1;
+
+ Holder::iterator i = _points.end();
+ i--;
+ return i->_distance;
+ }
+
+ unsigned _max;
+ GeoHash _near;
+ Holder _points;
+
+ };
+
+ struct BtreeLocation {
+ int pos;
+ bool found;
+ DiskLoc bucket;
+
+ BSONObj key(){
+ if ( bucket.isNull() )
+ return BSONObj();
+ return bucket.btree()->keyNode( pos ).key;
+ }
+
+ bool hasPrefix( const GeoHash& hash ){
+ BSONElement e = key().firstElement();
+ if ( e.eoo() )
+ return false;
+ return GeoHash( e ).hasPrefix( hash );
+ }
+
+ bool advance( int direction , int& totalFound , GeoAccumulator* all ){
+
+ if ( bucket.isNull() )
+ return false;
+ bucket = bucket.btree()->advance( bucket , pos , direction , "btreelocation" );
+
+ return checkCur( totalFound , all );
+ }
+
+ bool checkCur( int& totalFound , GeoAccumulator* all ){
+ if ( bucket.isNull() )
+ return false;
+
+ if ( bucket.btree()->isUsed(pos) ){
+ totalFound++;
+ all->add( bucket.btree()->keyNode( pos ) );
+ }
+ else {
+ GEODEBUG( "\t\t\t\t not used: " << key() );
+ }
+
+ return true;
+ }
+
+ string toString(){
+ stringstream ss;
+ ss << "bucket: " << bucket.toString() << " pos: " << pos << " found: " << found;
+ return ss.str();
+ }
+
+ static bool initial( const IndexDetails& id , const Geo2dType * spec ,
+ BtreeLocation& min , BtreeLocation& max ,
+ GeoHash start ,
+ int & found , GeoAccumulator * hopper ){
+
+ min.bucket = id.head.btree()->locate( id , id.head , start.wrap() ,
+ spec->_order , min.pos , min.found , minDiskLoc );
+ min.checkCur( found , hopper );
+ max = min;
+
+ if ( min.bucket.isNull() ){
+ min.bucket = id.head.btree()->locate( id , id.head , start.wrap() ,
+ spec->_order , min.pos , min.found , minDiskLoc , -1 );
+ min.checkCur( found , hopper );
+ }
+
+ return ! min.bucket.isNull() || ! max.bucket.isNull();
+ }
+ };
+
+ class GeoSearch {
+ public:
+ GeoSearch( const Geo2dType * g , const GeoHash& n , int numWanted=100 , BSONObj filter=BSONObj() )
+ : _spec( g ) , _n( n ) , _start( n ) ,
+ _numWanted( numWanted ) , _filter( filter ) ,
+ _hopper( new GeoHopper( g , numWanted , n , filter ) )
+ {
+ assert( g->getDetails() );
+ _nscanned = 0;
+ _found = 0;
+ }
+
+ void exec(){
+ const IndexDetails& id = *_spec->getDetails();
+
+ BtreeBucket * head = id.head.btree();
+ assert( head );
+ /*
+ * Search algorithm
+ * 1) use geohash prefix to find X items
+ * 2) compute max distance from want to an item
+ * 3) find optimal set of boxes that complete circle
+ * 4) use regular btree cursors to scan those boxes
+ */
+
+ GeoHopper * hopper = _hopper.get();
+
+ _prefix = _start;
+ { // 1 regular geo hash algorithm
+
+
+ BtreeLocation min,max;
+ if ( ! BtreeLocation::initial( id , _spec , min , max , _n , _found , hopper ) )
+ return;
+
+ while ( _hopper->found() < _numWanted ){
+ GEODEBUG( _prefix << "\t" << _found << "\t DESC" );
+ while ( min.hasPrefix( _prefix ) && min.advance( -1 , _found , hopper ) )
+ _nscanned++;
+ GEODEBUG( _prefix << "\t" << _found << "\t ASC" );
+ while ( max.hasPrefix( _prefix ) && max.advance( 1 , _found , hopper ) )
+ _nscanned++;
+ if ( ! _prefix.constrains() )
+ break;
+ _prefix = _prefix.up();
+ }
+ }
+ GEODEBUG( "done part 1" );
+ if ( _found && _prefix.constrains() ){
+ // 2
+ Point center( _spec , _n );
+ double boxSize = _spec->size( _prefix );
+ double farthest = hopper->farthest();
+ if ( farthest > boxSize )
+ boxSize = farthest;
+ Box want( center._x - ( boxSize / 2 ) , center._y - ( boxSize / 2 ) , boxSize );
+ while ( _spec->size( _prefix ) < boxSize )
+ _prefix = _prefix.up();
+ log(1) << "want: " << want << " found:" << _found << " hash size:" << _spec->size( _prefix ) << endl;
+
+ for ( int x=-1; x<=1; x++ ){
+ for ( int y=-1; y<=1; y++ ){
+ GeoHash toscan = _prefix;
+ toscan.move( x , y );
+
+ // 3 & 4
+ doBox( id , want , toscan );
+ }
+ }
+ }
+ GEODEBUG( "done search" )
+
+ }
+
+ void doBox( const IndexDetails& id , const Box& want , const GeoHash& toscan , int depth = 0 ){
+ Box testBox( _spec , toscan );
+ if ( logLevel > 0 ) log(1) << "\t doBox: " << testBox << "\t" << toscan.toString() << endl;
+
+ double intPer = testBox.intersects( want );
+
+ if ( intPer <= 0 )
+ return;
+
+ if ( intPer < .5 && depth < 3 ){
+ doBox( id , want , toscan + "00" , depth + 1);
+ doBox( id , want , toscan + "01" , depth + 1);
+ doBox( id , want , toscan + "10" , depth + 1);
+ doBox( id , want , toscan + "11" , depth + 1);
+ return;
+ }
+
+ BtreeLocation loc;
+ loc.bucket = id.head.btree()->locate( id , id.head , toscan.wrap() , _spec->_order ,
+ loc.pos , loc.found , minDiskLoc );
+ loc.checkCur( _found , _hopper.get() );
+ while ( loc.hasPrefix( toscan ) && loc.advance( 1 , _found , _hopper.get() ) )
+ _nscanned++;
+
+ }
+
+
+ const Geo2dType * _spec;
+
+ GeoHash _n;
+ GeoHash _start;
+ GeoHash _prefix;
+ int _numWanted;
+ BSONObj _filter;
+ shared_ptr<GeoHopper> _hopper;
+
+ long long _nscanned;
+ int _found;
+ };
+
+ class GeoCursorBase : public Cursor {
+ public:
+ GeoCursorBase( const Geo2dType * spec )
+ : _spec( spec ), _id( _spec->getDetails() ){
+
+ }
+
+ virtual DiskLoc refLoc(){ return DiskLoc(); }
+
+ virtual BSONObj indexKeyPattern() {
+ return _spec->keyPattern();
+ }
+
+ virtual void noteLocation() {
+ assert(0);
+ }
+
+ /* called before query getmore block is iterated */
+ virtual void checkLocation() {
+ assert(0);
+ }
+
+ virtual bool supportGetMore() { return false; }
+
+ virtual bool getsetdup(DiskLoc loc){
+ return false;
+ }
+
+ const Geo2dType * _spec;
+ const IndexDetails * _id;
+ };
+
+ class GeoSearchCursor : public GeoCursorBase {
+ public:
+ GeoSearchCursor( shared_ptr<GeoSearch> s )
+ : GeoCursorBase( s->_spec ) ,
+ _s( s ) , _cur( s->_hopper->_points.begin() ) , _end( s->_hopper->_points.end() ) {
+ }
+
+ virtual ~GeoSearchCursor() {}
+
+ virtual bool ok(){
+ return _cur != _end;
+ }
+
+ virtual Record* _current(){ assert(ok()); return _cur->_loc.rec(); }
+ virtual BSONObj current(){ assert(ok()); return _cur->_o; }
+ virtual DiskLoc currLoc(){ assert(ok()); return _cur->_loc; }
+ virtual bool advance(){ _cur++; return ok(); }
+ virtual BSONObj currKey() const { return _cur->_key; }
+
+ virtual string toString() {
+ return "GeoSearchCursor";
+ }
+
+
+ virtual BSONObj prettyStartKey() const {
+ return BSON( _s->_spec->_geo << _s->_prefix.toString() );
+ }
+ virtual BSONObj prettyEndKey() const {
+ GeoHash temp = _s->_prefix;
+ temp.move( 1 , 1 );
+ return BSON( _s->_spec->_geo << temp.toString() );
+ }
+
+
+ shared_ptr<GeoSearch> _s;
+ GeoHopper::Holder::iterator _cur;
+ GeoHopper::Holder::iterator _end;
+ };
+
+ class GeoBrowse : public GeoCursorBase , public GeoAccumulator {
+ public:
+ GeoBrowse( const Geo2dType * g , string type , BSONObj filter = BSONObj() )
+ : GeoCursorBase( g ) ,GeoAccumulator( g , filter ) ,
+ _type( type ) , _filter( filter ) , _firstCall(true) {
+ }
+
+ virtual string toString() {
+ return (string)"GeoBrowse-" + _type;
+ }
+
+ virtual bool ok(){
+ if ( _firstCall ){
+ fillStack();
+ _firstCall = false;
+ }
+ if ( ! _cur.isEmpty() || _stack.size() )
+ return true;
+
+ while ( moreToDo() ){
+ fillStack();
+ if ( ! _cur.isEmpty() )
+ return true;
+ }
+
+ return false;
+ }
+
+ virtual bool advance(){
+ _cur._o = BSONObj();
+
+ if ( _stack.size() ){
+ _cur = _stack.front();
+ _stack.pop_front();
+ return true;
+ }
+
+ if ( ! moreToDo() )
+ return false;
+
+ while ( _cur.isEmpty() && moreToDo() )
+ fillStack();
+ return ! _cur.isEmpty();
+ }
+
+ virtual Record* _current(){ assert(ok()); return _cur._loc.rec(); }
+ virtual BSONObj current(){ assert(ok()); return _cur._o; }
+ virtual DiskLoc currLoc(){ assert(ok()); return _cur._loc; }
+ virtual BSONObj currKey() const { return _cur._key; }
+
+
+ virtual bool moreToDo() = 0;
+ virtual void fillStack() = 0;
+
+ virtual void addSpecific( const KeyNode& node , double d ){
+ if ( _cur.isEmpty() )
+ _cur = GeoPoint( node , d );
+ else
+ _stack.push_back( GeoPoint( node , d ) );
+ }
+
+ string _type;
+ BSONObj _filter;
+ list<GeoPoint> _stack;
+
+ GeoPoint _cur;
+ bool _firstCall;
+
+ };
+
+ class GeoCircleBrowse : public GeoBrowse {
+ public:
+
+ enum State {
+ START ,
+ DOING_EXPAND ,
+ DOING_AROUND ,
+ DONE
+ } _state;
+
+ GeoCircleBrowse( const Geo2dType * g , const BSONObj& circle , BSONObj filter = BSONObj() )
+ : GeoBrowse( g , "circle" , filter ){
+
+ uassert( 13060 , "$center needs 2 fields (middle,max distance)" , circle.nFields() == 2 );
+ BSONObjIterator i(circle);
+ _start = g->_tohash( i.next() );
+ _prefix = _start;
+ _maxDistance = i.next().numberDouble();
+ uassert( 13061 , "need a max distance > 0 " , _maxDistance > 0 );
+
+ _state = START;
+ _found = 0;
+
+ ok();
+ }
+
+ virtual bool moreToDo(){
+ return _state != DONE;
+ }
+
+ virtual void fillStack(){
+ if ( _state == START ){
+ if ( ! BtreeLocation::initial( *_id , _spec , _min , _max ,
+ _prefix , _found , this ) ){
+ _state = DONE;
+ return;
+ }
+ _state = DOING_EXPAND;
+ }
+
+ if ( _state == DOING_EXPAND ){
+ GEODEBUG( "circle prefix [" << _prefix << "]" );
+ while ( _min.hasPrefix( _prefix ) && _min.advance( -1 , _found , this ) );
+ while ( _max.hasPrefix( _prefix ) && _max.advance( 1 , _found , this ) );
+
+ if ( ! _prefix.constrains() ){
+ GEODEBUG( "\t exhausted the btree" );
+ _state = DONE;
+ return;
+ }
+
+ if ( _g->distance( _prefix , _start ) > _maxDistance ){
+ GEODEBUG( "\tpast circle bounds" );
+ GeoHash tr = _prefix;
+ tr.move( 1 , 1 );
+ if ( _g->distance( tr , _start ) > _maxDistance )
+ _state = DOING_AROUND;
+ else
+ _prefix = _prefix.up();
+ }
+ else
+ _prefix = _prefix.up();
+ return;
+ }
+
+ if ( _state == DOING_AROUND ){
+ _state = DONE;
+ return;
+ }
+ }
+
+ virtual bool checkDistance( const GeoHash& h , double& d ){
+ d = _g->distance( _start , h );
+ GEODEBUG( "\t " << h << "\t" << d );
+ return d <= ( _maxDistance + .01 );
+ }
+
+ GeoHash _start;
+ double _maxDistance;
+
+ int _found;
+
+ GeoHash _prefix;
+ BtreeLocation _min;
+ BtreeLocation _max;
+
+ };
+
+ class GeoBoxBrowse : public GeoBrowse {
+ public:
+
+ enum State {
+ START ,
+ DOING_EXPAND ,
+ DONE
+ } _state;
+
+ GeoBoxBrowse( const Geo2dType * g , const BSONObj& box , BSONObj filter = BSONObj() )
+ : GeoBrowse( g , "box" , filter ){
+
+ uassert( 13063 , "$box needs 2 fields (bottomLeft,topRight)" , box.nFields() == 2 );
+ BSONObjIterator i(box);
+ _bl = g->_tohash( i.next() );
+ _tr = g->_tohash( i.next() );
+
+ _want._min = Point( _g , _bl );
+ _want._max = Point( _g , _tr );
+
+ uassert( 13064 , "need an area > 0 " , _want.area() > 0 );
+
+ _state = START;
+ _found = 0;
+
+ Point center = _want.center();
+ _prefix = _g->_hash( center._x , center._y );
+
+ GEODEBUG( "center : " << center.toString() << "\t" << _prefix );
+
+ {
+ GeoHash a(0LL,32);
+ GeoHash b(0LL,32);
+ b.move(1,1);
+ _fudge = _g->distance(a,b);
+ }
+
+ ok();
+ }
+
+ virtual bool moreToDo(){
+ return _state != DONE;
+ }
+
+ virtual void fillStack(){
+ if ( _state == START ){
+
+ if ( ! BtreeLocation::initial( *_id , _spec , _min , _max ,
+ _prefix , _found , this ) ){
+ _state = DONE;
+ return;
+ }
+ _state = DOING_EXPAND;
+ }
+
+ if ( _state == DOING_EXPAND ){
+ int started = _found;
+ while ( started == _found || _state == DONE ){
+ GEODEBUG( "box prefix [" << _prefix << "]" );
+ while ( _min.hasPrefix( _prefix ) && _min.advance( -1 , _found , this ) );
+ while ( _max.hasPrefix( _prefix ) && _max.advance( 1 , _found , this ) );
+
+ if ( _state == DONE )
+ return;
+
+ if ( ! _prefix.constrains() ){
+ GEODEBUG( "box exhausted" );
+ _state = DONE;
+ return;
+ }
+
+ Box cur( _g , _prefix );
+ if ( cur._min._x + _fudge < _want._min._x &&
+ cur._min._y + _fudge < _want._min._y &&
+ cur._max._x - _fudge > _want._max._x &&
+ cur._max._y - _fudge > _want._max._y ){
+
+ _state = DONE;
+ GeoHash temp = _prefix.commonPrefix( cur._max.hash( _g ) );
+
+ GEODEBUG( "box done : " << cur.toString() << " prefix:" << _prefix << " common:" << temp );
+
+ if ( temp == _prefix )
+ return;
+ _prefix = temp;
+ GEODEBUG( "\t one more loop" );
+ continue;
+ }
+ else {
+ _prefix = _prefix.up();
+ }
+ }
+ return;
+ }
+
+ }
+
+ virtual bool checkDistance( const GeoHash& h , double& d ){
+ bool res = _want.inside( Point( _g , h ) , _fudge );
+ GEODEBUG( "\t want : " << _want.toString()
+ << " point: " << Point( _g , h ).toString()
+ << " in : " << res );
+ return res;
+ }
+
+ GeoHash _bl;
+ GeoHash _tr;
+ Box _want;
+
+ int _found;
+
+ GeoHash _prefix;
+ BtreeLocation _min;
+ BtreeLocation _max;
+
+ double _fudge;
+ };
+
+
+ auto_ptr<Cursor> Geo2dType::newCursor( const BSONObj& query , const BSONObj& order , int numWanted ) const {
+ if ( numWanted < 0 )
+ numWanted = numWanted * -1;
+ else if ( numWanted == 0 )
+ numWanted = 100;
+
+ BSONObjIterator i(query);
+ while ( i.more() ){
+ BSONElement e = i.next();
+
+ if ( _geo != e.fieldName() )
+ continue;
+
+ if ( e.type() != Object )
+ continue;
+
+ switch ( e.embeddedObject().firstElement().getGtLtOp() ){
+ case BSONObj::opNEAR: {
+ e = e.embeddedObject().firstElement();
+ shared_ptr<GeoSearch> s( new GeoSearch( this , _tohash(e) , numWanted , query ) );
+ s->exec();
+ auto_ptr<Cursor> c;
+ c.reset( new GeoSearchCursor( s ) );
+ return c;
+ }
+ case BSONObj::opWITHIN: {
+ e = e.embeddedObject().firstElement();
+ uassert( 13057 , "$within has to take an object or array" , e.isABSONObj() );
+ e = e.embeddedObject().firstElement();
+ string type = e.fieldName();
+ if ( type == "$center" ){
+ uassert( 13059 , "$center has to take an object or array" , e.isABSONObj() );
+ auto_ptr<Cursor> c;
+ c.reset( new GeoCircleBrowse( this , e.embeddedObjectUserCheck() , query ) );
+ return c;
+ }
+ else if ( type == "$box" ){
+ uassert( 13065 , "$box has to take an object or array" , e.isABSONObj() );
+ auto_ptr<Cursor> c;
+ c.reset( new GeoBoxBrowse( this , e.embeddedObjectUserCheck() , query ) );
+ return c;
+ }
+ throw UserException( 13058 , (string)"unknown $with type: " + type );
+ }
+ default:
+ break;
+ }
+ }
+
+ throw UserException( 13042 , (string)"missing geo field (" + _geo + ") in : " + query.toString() );
+ }
+
+ // ------
+ // commands
+ // ------
+
+ class Geo2dFindNearCmd : public Command {
+ public:
+ Geo2dFindNearCmd() : Command( "geoNear" ){}
+ virtual LockType locktype(){ return READ; }
+ bool slaveOk() { return true; }
+ bool slaveOverrideOk() { return true; }
+ bool run(const char * stupidns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl){
+ string ns = nsToDatabase( stupidns ) + "." + cmdObj.firstElement().valuestr();
+
+ NamespaceDetails * d = nsdetails( ns.c_str() );
+ if ( ! d ){
+ errmsg = "can't find ns";
+ return false;
+ }
+
+ int geoIdx = -1;
+ {
+ NamespaceDetails::IndexIterator ii = d->ii();
+ while ( ii.more() ){
+ IndexDetails& id = ii.next();
+ if ( id.getSpec().getTypeName() == GEO2DNAME ){
+ if ( geoIdx >= 0 ){
+ errmsg = "2 geo indexes :(";
+ return false;
+ }
+ geoIdx = ii.pos() - 1;
+ }
+ }
+ }
+
+ if ( geoIdx < 0 ){
+ errmsg = "no geo index :(";
+ return false;
+ }
+
+ result.append( "ns" , ns );
+
+ IndexDetails& id = d->idx( geoIdx );
+ Geo2dType * g = (Geo2dType*)id.getSpec().getType();
+ assert( &id == g->getDetails() );
+
+ int numWanted = 100;
+ if ( cmdObj["num"].isNumber() )
+ numWanted = cmdObj["num"].numberInt();
+
+ uassert(13046, "'near' param missing/invalid", !cmdObj["near"].eoo());
+ const GeoHash n = g->_tohash( cmdObj["near"] );
+ result.append( "near" , n.toString() );
+
+ BSONObj filter;
+ if ( cmdObj["query"].type() == Object )
+ filter = cmdObj["query"].embeddedObject();
+
+ GeoSearch gs( g , n , numWanted , filter );
+
+ if ( cmdObj["start"].type() == String){
+ GeoHash start = (string) cmdObj["start"].valuestr();
+ gs._start = start;
+ }
+
+ gs.exec();
+
+ double distanceMultiplier = 1;
+ if ( cmdObj["distanceMultiplier"].isNumber() )
+ distanceMultiplier = cmdObj["distanceMultiplier"].number();
+
+ double totalDistance = 0;
+
+
+ BSONObjBuilder arr( result.subarrayStart( "results" ) );
+ int x = 0;
+ for ( GeoHopper::Holder::iterator i=gs._hopper->_points.begin(); i!=gs._hopper->_points.end(); i++ ){
+ const GeoPoint& p = *i;
+
+ double dis = distanceMultiplier * p._distance;
+ totalDistance += dis;
+
+ BSONObjBuilder bb( arr.subobjStart( BSONObjBuilder::numStr( x++ ).c_str() ) );
+ bb.append( "dis" , dis );
+ bb.append( "obj" , p._o );
+ bb.done();
+ }
+ arr.done();
+
+ BSONObjBuilder stats( result.subobjStart( "stats" ) );
+ stats.append( "time" , cc().curop()->elapsedMillis() );
+ stats.appendNumber( "btreelocs" , gs._nscanned );
+ stats.appendNumber( "nscanned" , gs._hopper->_lookedAt );
+ stats.appendNumber( "objectsLoaded" , gs._hopper->_objectsLoaded );
+ stats.append( "avgDistance" , totalDistance / x );
+ stats.done();
+
+ return true;
+ }
+
+ } geo2dFindNearCmd;
+
+ class GeoWalkCmd : public Command {
+ public:
+ GeoWalkCmd() : Command( "geoWalk" ){}
+ virtual LockType locktype(){ return READ; }
+ bool slaveOk() { return true; }
+ bool slaveOverrideOk() { return true; }
+ bool run(const char * stupidns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl){
+ string ns = nsToDatabase( stupidns ) + "." + cmdObj.firstElement().valuestr();
+
+ NamespaceDetails * d = nsdetails( ns.c_str() );
+ if ( ! d ){
+ errmsg = "can't find ns";
+ return false;
+ }
+
+ int geoIdx = -1;
+ {
+ NamespaceDetails::IndexIterator ii = d->ii();
+ while ( ii.more() ){
+ IndexDetails& id = ii.next();
+ if ( id.getSpec().getTypeName() == GEO2DNAME ){
+ if ( geoIdx >= 0 ){
+ errmsg = "2 geo indexes :(";
+ return false;
+ }
+ geoIdx = ii.pos() - 1;
+ }
+ }
+ }
+
+ if ( geoIdx < 0 ){
+ errmsg = "no geo index :(";
+ return false;
+ }
+
+
+ IndexDetails& id = d->idx( geoIdx );
+ Geo2dType * g = (Geo2dType*)id.getSpec().getType();
+ assert( &id == g->getDetails() );
+
+ int max = 100000;
+
+ BtreeCursor c( d , geoIdx , id , BSONObj() , BSONObj() , true , 1 );
+ while ( c.ok() && max-- ){
+ GeoHash h( c.currKey().firstElement() );
+ int len;
+ cout << "\t" << h.toString()
+ << "\t" << c.current()[g->_geo]
+ << "\t" << hex << h.getHash()
+ << "\t" << hex << ((long long*)c.currKey().firstElement().binData(len))[0]
+ << "\t" << c.current()["_id"]
+ << endl;
+ c.advance();
+ }
+
+ return true;
+ }
+
+ } geoWalkCmd;
+
+}
diff --git a/db/instance.cpp b/db/instance.cpp
index e8515c4..909911e 100644
--- a/db/instance.cpp
+++ b/db/instance.cpp
@@ -35,7 +35,8 @@
#if !defined(_WIN32)
#include <sys/file.h>
#endif
-#include "dbstats.h"
+#include "stats/counters.h"
+#include "background.h"
namespace mongo {
@@ -45,19 +46,9 @@ namespace mongo {
void receivedInsert(Message& m, CurOp& op);
bool receivedGetMore(DbResponse& dbresponse, Message& m, CurOp& curop );
- CmdLine cmdLine;
-
int nloggedsome = 0;
#define LOGSOME if( ++nloggedsome < 1000 || nloggedsome % 100 == 0 )
- SlaveTypes slave = NotSlave;
- bool master = false; // true means keep an op log
- bool autoresync = false;
-
- /* we use new here so we don't have to worry about destructor orders at program shutdown */
- MongoMutex &dbMutex( *(new MongoMutex) );
-// MutexInfo dbMutexInfo;
-
string dbExecCommand;
string bind_ip = "";
@@ -66,8 +57,6 @@ namespace mongo {
DiagLog _diaglog;
- int opIdMem = 100000000;
-
bool useCursors = true;
bool useHints = true;
@@ -87,25 +76,30 @@ namespace mongo {
// see FSyncCommand:
unsigned lockedForWriting;
- boost::mutex lockedForWritingMutex;
+ mongo::mutex lockedForWritingMutex;
bool unlockRequested = false;
void inProgCmd( Message &m, DbResponse &dbresponse ) {
BSONObjBuilder b;
- AuthenticationInfo *ai = cc().ai;
- if( !ai->isAuthorized("admin") ) {
+ if( ! cc().isAdmin() ){
BSONObjBuilder b;
b.append("err", "unauthorized");
}
else {
+ DbMessage d(m);
+ QueryMessage q(d);
+ bool all = q.query["$all"].trueValue();
vector<BSONObj> vals;
{
- boostlock bl(Client::clientsMutex);
+ Client& me = cc();
+ scoped_lock bl(Client::clientsMutex);
for( set<Client*>::iterator i = Client::clients.begin(); i != Client::clients.end(); i++ ) {
Client *c = *i;
+ if ( c == &me )
+ continue;
CurOp& co = *(c->curop());
- if( co.active() )
+ if( all || co.active() )
vals.push_back( co.infoNoauth() );
}
}
@@ -116,14 +110,13 @@ namespace mongo {
b.append("info", "use command {unlock:0} to terminate the fsync write/snapshot lock");
}
}
-
+
replyToQuery(0, m, dbresponse, b.obj());
}
void killOp( Message &m, DbResponse &dbresponse ) {
BSONObj obj;
- AuthenticationInfo *ai = currentClient.get()->ai;
- if( !ai->isAuthorized("admin") ) {
+ if( ! cc().isAdmin() ){
obj = fromjson("{\"err\":\"unauthorized\"}");
}
/*else if( !dbMutexInfo.isLocked() )
@@ -146,8 +139,7 @@ namespace mongo {
void unlockFsync(const char *ns, Message& m, DbResponse &dbresponse) {
BSONObj obj;
- AuthenticationInfo *ai = currentClient.get()->ai;
- if( !ai->isAuthorized("admin") || strncmp(ns, "admin.", 6) != 0 ) {
+ if( ! cc().isAdmin() || strncmp(ns, "admin.", 6) != 0 ) {
obj = fromjson("{\"err\":\"unauthorized\"}");
}
else {
@@ -163,10 +155,7 @@ namespace mongo {
replyToQuery(0, m, dbresponse, obj);
}
- static bool receivedQuery(DbResponse& dbresponse, Message& m,
- CurOp& op, bool logit,
- mongolock& lock
- ) {
+ static bool receivedQuery(Client& c, DbResponse& dbresponse, Message& m ){
bool ok = true;
MSGID responseTo = m.data->id;
@@ -174,26 +163,9 @@ namespace mongo {
QueryMessage q(d);
QueryResult* msgdata;
- Client& c = cc();
-
+ CurOp& op = *(c.curop());
+
try {
- if (q.fields.get() && q.fields->errmsg)
- uassert( 10053 , q.fields->errmsg, false);
-
- /* note these are logged BEFORE authentication -- which is sort of ok */
- if ( _diaglog.level && logit ) {
- if ( strstr(q.ns, ".$cmd") ) {
- /* $cmd queries are "commands" and usually best treated as write operations */
- OPWRITE;
- }
- else {
- OPREAD;
- }
- }
-
- setClient( q.ns, dbpath, &lock );
- c.top.setRead();
- c.curop()->setNS(q.ns);
msgdata = runQuery(m, q, op ).release();
}
catch ( AssertionException& e ) {
@@ -230,32 +202,25 @@ namespace mongo {
resp->setData(msgdata, true); // transport will free
dbresponse.response = resp;
dbresponse.responseTo = responseTo;
- Database *database = c.database();
- if ( database ) {
- if ( database->profile )
- op.debug().str << " bytes:" << resp->data->dataLen();
- }
- else {
- if ( strstr(q.ns, "$cmd") == 0 ) // (this condition is normal for $cmd dropDatabase)
- log() << "ERROR: receiveQuery: database is null; ns=" << q.ns << endl;
+
+ if ( op.shouldDBProfile( 0 ) ){
+ op.debug().str << " bytes:" << resp->data->dataLen();
}
return ok;
}
- bool commandIsReadOnly(BSONObj& _cmdobj);
-
// Returns false when request includes 'end'
bool assembleResponse( Message &m, DbResponse &dbresponse, const sockaddr_in &client ) {
- bool writeLock = true;
-
// before we lock...
int op = m.data->operation();
- globalOpCounters.gotOp( op );
+ bool isCommand = false;
const char *ns = m.data->_data + 4;
if ( op == dbQuery ) {
if( strstr(ns, ".$cmd") ) {
+ isCommand = true;
+ OPWRITE;
if( strstr(ns, ".$cmd.sys.") ) {
if( strstr(ns, "$cmd.sys.inprog") ) {
inProgCmd(m, dbresponse);
@@ -270,17 +235,21 @@ namespace mongo {
return true;
}
}
- DbMessage d( m );
- QueryMessage q( d );
- writeLock = !commandIsReadOnly(q.query);
+
+ }
+ else {
+ OPREAD;
}
- else
- writeLock = false;
}
else if( op == dbGetMore ) {
- writeLock = false;
+ OPREAD;
+ }
+ else {
+ OPWRITE;
}
+ globalOpCounters.gotOp( op , isCommand );
+
if ( handlePossibleShardedMessage( m , dbresponse ) ){
/* important to do this before we lock
so if a message has to be forwarded, doesn't block for that
@@ -289,161 +258,115 @@ namespace mongo {
}
Client& c = cc();
- c.clearns();
auto_ptr<CurOp> nestedOp;
CurOp* currentOpP = c.curop();
if ( currentOpP->active() ){
- nestedOp.reset( new CurOp() );
+ nestedOp.reset( new CurOp( &c , currentOpP ) );
currentOpP = nestedOp.get();
}
CurOp& currentOp = *currentOpP;
- currentOp.reset(client);
- currentOp.setOp(op);
+ currentOp.reset(client,op);
OpDebug& debug = currentOp.debug();
StringBuilder& ss = debug.str;
+ ss << opToString( op ) << " ";
int logThreshold = cmdLine.slowMS;
bool log = logLevel >= 1;
-
- Timer t( currentOp.startTime() );
-
- mongolock lk(writeLock);
-
-#if 0
- /* use this if you only want to process operations for a particular namespace.
- maybe add to cmd line parms or something fancier.
- */
- DbMessage ddd(m);
- if ( strncmp(ddd.getns(), "clusterstock", 12) != 0 ) {
- static int q;
- if ( ++q < 20 )
- out() << "TEMP skip " << ddd.getns() << endl;
- goto skip;
- }
-#endif
-
+
if ( op == dbQuery ) {
- // receivedQuery() does its own authorization processing.
- if ( ! receivedQuery(dbresponse, m, currentOp, true, lk) )
+ if ( ! receivedQuery(c , dbresponse, m ) )
log = true;
}
else if ( op == dbGetMore ) {
- // does its own authorization processing.
- OPREAD;
DEV log = true;
- ss << "getmore ";
if ( ! receivedGetMore(dbresponse, m, currentOp) )
log = true;
}
else if ( op == dbMsg ) {
- /* deprecated / rarely used. intended for connection diagnostics. */
- ss << "msg ";
+ // deprecated - replaced by commands
char *p = m.data->_data;
int len = strlen(p);
if ( len > 400 )
out() << curTimeMillis() % 10000 <<
- " long msg received, len:" << len <<
- " ends with: " << p + len - 10 << endl;
- bool end = false; //strcmp("end", p) == 0;
+ " long msg received, len:" << len <<
+ " ends with: " << p + len - 10 << endl;
+
Message *resp = new Message();
- resp->setData(opReply, "i am fine");
+ if ( strcmp( "end" , p ) == 0 )
+ resp->setData( opReply , "dbMsg end no longer supported" );
+ else
+ resp->setData( opReply , "i am fine - dbMsg deprecated");
+
dbresponse.response = resp;
dbresponse.responseTo = m.data->id;
- //dbMsgPort.reply(m, resp);
- if ( end )
- return false;
}
else {
const char *ns = m.data->_data + 4;
char cl[256];
nsToDatabase(ns, cl);
- currentOp.setNS(ns);
- AuthenticationInfo *ai = currentClient.get()->ai;
- if( !ai->isAuthorized(cl) ) {
+ if( ! c.getAuthenticationInfo()->isAuthorized(cl) ) {
uassert_nothrow("unauthorized");
}
- else if ( op == dbInsert ) {
- OPWRITE;
- try {
- ss << "insert ";
- receivedInsert(m, currentOp);
- }
- catch ( AssertionException& e ) {
- LOGSOME problem() << " Caught Assertion insert, continuing\n";
- ss << " exception " << e.toString();
- log = true;
- }
- }
- else if ( op == dbUpdate ) {
- OPWRITE;
- try {
- ss << "update ";
- receivedUpdate(m, currentOp);
- }
- catch ( AssertionException& e ) {
- LOGSOME problem() << " Caught Assertion update, continuing" << endl;
- ss << " exception " << e.toString();
- log = true;
- }
- }
- else if ( op == dbDelete ) {
- OPWRITE;
- try {
- ss << "remove ";
- receivedDelete(m, currentOp);
- }
- catch ( AssertionException& e ) {
- LOGSOME problem() << " Caught Assertion receivedDelete, continuing" << endl;
- ss << " exception " << e.toString();
- log = true;
- }
- }
- else if ( op == dbKillCursors ) {
- OPREAD;
+ else {
try {
- logThreshold = 10;
- ss << "killcursors ";
- receivedKillCursors(m);
+ if ( op == dbInsert ) {
+ receivedInsert(m, currentOp);
+ }
+ else if ( op == dbUpdate ) {
+ receivedUpdate(m, currentOp);
+ }
+ else if ( op == dbDelete ) {
+ receivedDelete(m, currentOp);
+ }
+ else if ( op == dbKillCursors ) {
+ currentOp.ensureStarted();
+ logThreshold = 10;
+ ss << "killcursors ";
+ receivedKillCursors(m);
+ }
+ else {
+ out() << " operation isn't supported: " << op << endl;
+ currentOp.done();
+ log = true;
+ }
}
catch ( AssertionException& e ) {
- problem() << " Caught Assertion in kill cursors, continuing" << endl;
+ problem() << " Caught Assertion in " << opToString(op) << " , continuing" << endl;
ss << " exception " + e.toString();
log = true;
}
}
- else {
- out() << " operation isn't supported: " << op << endl;
- currentOp.setActive(false);
- assert(false);
- }
}
- int ms = t.millis();
+ currentOp.ensureStarted();
+ currentOp.done();
+ int ms = currentOp.totalTimeMillis();
+
log = log || (logLevel >= 2 && ++ctr % 512 == 0);
DEV log = true;
if ( log || ms > logThreshold ) {
ss << ' ' << ms << "ms";
mongo::log() << ss.str() << endl;
}
- Database *database = c.database();
- if ( database && database->profile >= 1 ) {
- if ( database->profile >= 2 || ms >= cmdLine.slowMS ) {
- // performance profiling is on
- if ( dbMutex.getState() > 1 || dbMutex.getState() < -1 ){
- out() << "warning: not profiling because recursive lock" << endl;
+
+ if ( currentOp.shouldDBProfile( ms ) ){
+ // performance profiling is on
+ if ( dbMutex.getState() < 0 ){
+ mongo::log(1) << "note: not profiling because recursive read lock" << endl;
+ }
+ else {
+ mongolock lk(true);
+ if ( dbHolder.isLoaded( nsToDatabase( currentOp.getNS() ) , dbpath ) ){
+ Client::Context c( currentOp.getNS() );
+ profile(ss.str().c_str(), ms);
}
else {
- string old_ns = c.ns();
- Database * old_db = c.database();
- lk.releaseAndWriteLock();
- Client::Context c( old_ns , old_db );
- profile(ss.str().c_str(), ms);
+ mongo::log() << "note: not profiling because db went away - probably a close on: " << currentOp.getNS() << endl;
}
}
}
- currentOp.setActive(false);
return true;
} /* assembleResponse() */
@@ -452,7 +375,7 @@ namespace mongo {
int *x = (int *) m.data->_data;
x++; // reserved
int n = *x++;
- assert( n >= 1 );
+ uassert( 13004 , "sent 0 cursors to kill" , n >= 1 );
if ( n > 2000 ) {
problem() << "Assertion failure, receivedKillCursors, n=" << n << endl;
assert( n < 30000 );
@@ -460,29 +383,34 @@ namespace mongo {
killCursors(n, (long long *) x);
}
- /* cl - database name
+ /* db - database name
path - db directory
*/
- void closeDatabase( const char *cl, const string& path ) {
- Database *database = cc().database();
- assert( database );
- assert( database->name == cl );
- /*
- if ( string("local") != cl ) {
- DBInfo i(cl);
- i.dbDropped();
- }*/
+ void closeDatabase( const char *db, const string& path ) {
+ assertInWriteLock();
+
+ Client::Context * ctx = cc().getContext();
+ assert( ctx );
+ assert( ctx->inDB( db , path ) );
+ Database *database = ctx->db();
+ assert( database->name == db );
+
+ replCheckCloseDatabase( database );
+
+ if( BackgroundOperation::inProgForDb(db) ) {
+ log() << "warning: bg op in prog during close db? " << db << endl;
+ }
/* important: kill all open cursors on the database */
- string prefix(cl);
+ string prefix(db);
prefix += '.';
ClientCursor::invalidate(prefix.c_str());
NamespaceDetailsTransient::clearForPrefix( prefix.c_str() );
- dbHolder.erase( cl, path );
+ dbHolder.erase( db, path );
delete database; // closes files
- cc().clearns();
+ ctx->clear();
}
void receivedUpdate(Message& m, CurOp& op) {
@@ -490,9 +418,6 @@ namespace mongo {
const char *ns = d.getns();
assert(*ns);
uassert( 10054 , "not master", isMasterNs( ns ) );
- setClient(ns);
- Client& client = cc();
- client.top.setWrite();
op.debug().str << ns << ' ';
int flags = d.pullInt();
BSONObj query = d.nextJsObj();
@@ -507,13 +432,18 @@ namespace mongo {
bool multi = flags & UpdateOption_Multi;
{
string s = query.toString();
- /* todo: we shouldn't do all this ss stuff when we don't need it, it will slow us down. */
+ /* todo: we shouldn't do all this ss stuff when we don't need it, it will slow us down.
+ instead, let's just story the query BSON in the debug object, and it can toString()
+ lazily
+ */
op.debug().str << " query: " << s;
- CurOp& currentOp = *client.curop();
- currentOp.setQuery(query);
+ op.setQuery(query);
}
+
+ mongolock lk(1);
+ Client::Context ctx( ns );
+
UpdateResult res = updateObjects(ns, toupdate, query, upsert, multi, true, op.debug() );
- /* TODO FIX: recordUpdate should take a long int for parm #2 */
recordUpdate( res.existing , (int) res.num ); // for getlasterror
}
@@ -522,9 +452,6 @@ namespace mongo {
const char *ns = d.getns();
assert(*ns);
uassert( 10056 , "not master", isMasterNs( ns ) );
- setClient(ns);
- Client& client = cc();
- client.top.setWrite();
int flags = d.pullInt();
bool justOne = flags & 1;
assert( d.moreJSObjs() );
@@ -532,35 +459,38 @@ namespace mongo {
{
string s = pattern.toString();
op.debug().str << " query: " << s;
- CurOp& currentOp = *client.curop();
- currentOp.setQuery(pattern);
+ op.setQuery(pattern);
}
- int n = deleteObjects(ns, pattern, justOne, true);
- recordDelete( n );
+
+ writelock lk(ns);
+ Client::Context ctx(ns);
+
+ long long n = deleteObjects(ns, pattern, justOne, true);
+ recordDelete( (int) n );
}
QueryResult* emptyMoreResult(long long);
bool receivedGetMore(DbResponse& dbresponse, Message& m, CurOp& curop ) {
+ StringBuilder& ss = curop.debug().str;
bool ok = true;
+
DbMessage d(m);
+
const char *ns = d.getns();
- StringBuilder& ss = curop.debug().str;
- ss << ns;
- setClient(ns);
- cc().top.setRead();
int ntoreturn = d.pullInt();
long long cursorid = d.pullInt64();
- ss << " cid:" << cursorid;
- ss << " ntoreturn:" << ntoreturn;
+
+ ss << ns << " cid:" << cursorid << " ntoreturn:" << ntoreturn;;
+
QueryResult* msgdata;
try {
- AuthenticationInfo *ai = currentClient.get()->ai;
- uassert( 10057 , "unauthorized", ai->isAuthorized(cc().database()->name.c_str()));
+ mongolock lk(false);
+ Client::Context ctx(ns);
msgdata = getMore(ns, ntoreturn, cursorid, curop);
}
catch ( AssertionException& e ) {
- ss << " exception " + e.toString();
+ ss << " exception " << e.toString();
msgdata = emptyMoreResult(cursorid);
ok = false;
}
@@ -570,7 +500,7 @@ namespace mongo {
ss << " nreturned:" << msgdata->nReturned;
dbresponse.response = resp;
dbresponse.responseTo = m.data->id;
- //dbMsgPort.reply(m, resp);
+
return ok;
}
@@ -579,10 +509,10 @@ namespace mongo {
const char *ns = d.getns();
assert(*ns);
uassert( 10058 , "not master", isMasterNs( ns ) );
- setClient(ns);
- cc().top.setWrite();
op.debug().str << ns;
-
+
+ writelock lk(ns);
+ Client::Context ctx(ns);
while ( d.moreJSObjs() ) {
BSONObj js = d.nextJsObj();
uassert( 10059 , "object to insert too large", js.objsize() <= MaxBSONObjectSize);
@@ -610,14 +540,21 @@ namespace mongo {
boost::filesystem::path path( dbpath );
for ( boost::filesystem::directory_iterator i( path );
i != boost::filesystem::directory_iterator(); ++i ) {
- string fileName = boost::filesystem::path(*i).leaf();
- if ( fileName.length() > 3 && fileName.substr( fileName.length() - 3, 3 ) == ".ns" )
- names.push_back( fileName.substr( 0, fileName.length() - 3 ) );
+ if ( directoryperdb ) {
+ boost::filesystem::path p = *i;
+ string dbName = p.leaf();
+ p /= ( dbName + ".ns" );
+ if ( boost::filesystem::exists( p ) )
+ names.push_back( dbName );
+ } else {
+ string fileName = boost::filesystem::path(*i).leaf();
+ if ( fileName.length() > 3 && fileName.substr( fileName.length() - 3, 3 ) == ".ns" )
+ names.push_back( fileName.substr( 0, fileName.length() - 3 ) );
+ }
}
}
bool DBDirectClient::call( Message &toSend, Message &response, bool assertOk ) {
- SavedContext c;
if ( lastError._get() )
lastError.startRequest( toSend, lastError._get() );
DbResponse dbResponse;
@@ -628,7 +565,6 @@ namespace mongo {
}
void DBDirectClient::say( Message &toSend ) {
- SavedContext c;
if ( lastError._get() )
lastError.startRequest( toSend, lastError._get() );
DbResponse dbResponse;
@@ -646,15 +582,13 @@ namespace mongo {
}
- DBDirectClient::AlwaysAuthorized DBDirectClient::SavedContext::always;
-
DBClientBase * createDirectClient(){
return new DBDirectClient();
}
void recCacheCloseAll();
- boost::mutex &exitMutex( *( new boost::mutex ) );
+ mongo::mutex exitMutex;
int numExitCalls = 0;
void shutdown();
@@ -680,8 +614,9 @@ namespace mongo {
/* not using log() herein in case we are already locked */
void dbexit( ExitCode rc, const char *why) {
+ Client * c = currentClient.get();
{
- boostlock lk( exitMutex );
+ scoped_lock lk( exitMutex );
if ( numExitCalls++ > 0 ) {
if ( numExitCalls > 5 ){
// this means something horrible has happened
@@ -690,6 +625,7 @@ namespace mongo {
stringstream ss;
ss << "dbexit: " << why << "; exiting immediately" << endl;
tryToOutputFatal( ss.str() );
+ if ( c ) c->shutdown();
::exit( rc );
}
}
@@ -706,12 +642,12 @@ namespace mongo {
}
tryToOutputFatal( "dbexit: really exiting now\n" );
+ if ( c ) c->shutdown();
::exit(rc);
}
void shutdown() {
-
log() << "\t shutdown: going to close listening sockets..." << endl;
ListeningSockets::get()->closeAll();
@@ -751,10 +687,29 @@ namespace mongo {
void acquirePathLock() {
#if !defined(_WIN32) && !defined(__sunos__)
string name = ( boost::filesystem::path( dbpath ) / "mongod.lock" ).native_file_string();
- lockFile = open( name.c_str(), O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO );
- massert( 10309 , "Unable to create / open lock file for dbpath: " + name, lockFile > 0 );
- massert( 10310 , "Unable to acquire lock for dbpath: " + name, flock( lockFile, LOCK_EX | LOCK_NB ) == 0 );
+
+ bool oldFile = false;
+
+ if ( boost::filesystem::exists( name ) && boost::filesystem::file_size( name ) > 0 ){
+ oldFile = true;
+ }
+ lockFile = open( name.c_str(), O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO );
+ uassert( 10309 , "Unable to create / open lock file for dbpath: " + name, lockFile > 0 );
+ uassert( 10310 , "Unable to acquire lock for dbpath: " + name, flock( lockFile, LOCK_EX | LOCK_NB ) == 0 );
+
+ if ( oldFile ){
+ // we check this here because we want to see if we can get the lock
+ // if we can't, then its probably just another mongod running
+ cout << "************** \n"
+ << "old lock file: " << name << ". probably means unclean shutdown\n"
+ << "recommend removing file and running --repair\n"
+ << "see: http://dochub.mongodb.org/core/repair for more information\n"
+ << "*************" << endl;
+ uassert( 12596 , "old lock file" , 0 );
+ }
+
+
stringstream ss;
ss << getpid() << endl;
string s = ss.str();
diff --git a/db/instance.h b/db/instance.h
index b2b2c94..b545a78 100644
--- a/db/instance.h
+++ b/db/instance.h
@@ -38,12 +38,14 @@ namespace mongo {
7 = log a few reads, and all writes.
*/
int level;
+ mongo::mutex mutex;
+
DiagLog() : f(0) , level(0) { }
void init() {
if ( ! f && level ){
log() << "diagLogging = " << level << endl;
stringstream ss;
- ss << "diaglog." << hex << time(0);
+ ss << dbpath << "/diaglog." << hex << time(0);
string name = ss.str();
f = new ofstream(name.c_str(), ios::out | ios::binary);
if ( ! f->good() ) {
@@ -62,17 +64,26 @@ namespace mongo {
return old;
}
void flush() {
- if ( level ) f->flush();
+ if ( level ){
+ scoped_lock lk(mutex);
+ f->flush();
+ }
}
void write(char *data,int len) {
- if ( level & 1 ) f->write(data,len);
+ if ( level & 1 ){
+ scoped_lock lk(mutex);
+ f->write(data,len);
+ }
}
void readop(char *data, int len) {
if ( level & 2 ) {
bool log = (level & 4) == 0;
OCCASIONALLY log = true;
- if ( log )
+ if ( log ){
+ scoped_lock lk(mutex);
+ assert( f );
f->write(data,len);
+ }
}
}
};
@@ -124,53 +135,6 @@ namespace mongo {
// don't need to piggy back when connected locally
return say( toSend );
}
- class AlwaysAuthorized : public AuthenticationInfo {
- virtual bool isAuthorized( const char *dbname ) {
- return true;
- }
- };
-
- /* TODO: this looks bad that auth is set to always. is that really always safe? */
- class SavedContext {
- public:
- SavedContext() {
- _save = dbMutex.atLeastReadLocked();
-
- Client *c = currentClient.get();
- oldAuth = c->ai;
- // careful, don't want to free this:
- c->ai = &always;
-
- /* it only makes sense to manipulate a pointer - c->database() - if locked.
- thus the _saved flag.
- */
- if( _save ) {
- if ( c->database() ) {
- dbMutex.assertAtLeastReadLocked();
- _oldName = c->database()->name;
- }
- }
- }
- ~SavedContext() {
- Client *c = currentClient.get();
- c->ai = oldAuth;
- if( _save ) {
- if ( !_oldName.empty() ) {
- dbMutex.assertAtLeastReadLocked();
- setClient( _oldName.c_str() );
- }
- }
- else {
- // defensive
- cc().clearns();
- }
- }
- private:
- bool _save;
- static AlwaysAuthorized always;
- AuthenticationInfo *oldAuth;
- string _oldName;
- };
};
extern int lockFile;
diff --git a/db/introspect.cpp b/db/introspect.cpp
index 9cb477d..a041d48 100644
--- a/db/introspect.cpp
+++ b/db/introspect.cpp
@@ -26,8 +26,7 @@
namespace mongo {
- void profile(const char *str,
- int millis)
+ void profile( const char *str, int millis)
{
BSONObjBuilder b;
b.appendDate("ts", jsTime());
diff --git a/db/jsobj.cpp b/db/jsobj.cpp
index 1a299a5..9f9a684 100644
--- a/db/jsobj.cpp
+++ b/db/jsobj.cpp
@@ -20,6 +20,7 @@
#include "stdafx.h"
#include "jsobj.h"
#include "nonce.h"
+#include "../util/atomic_int.h"
#include "../util/goodies.h"
#include "../util/base64.h"
#include "../util/md5.hpp"
@@ -30,6 +31,7 @@
#include "jsobjmanipulator.h"
#include "../util/optime.h"
#include <boost/static_assert.hpp>
+#include <boost/any.hpp>
#undef assert
#define assert xassert
@@ -50,12 +52,6 @@ namespace mongo {
}
IDLabeler GENOID;
- BSONObjBuilder& operator<<(BSONObjBuilder& b, IDLabeler& id) {
- OID oid;
- oid.init();
- b.appendOID("_id", &oid);
- return b;
- }
DateNowLabeler DATENOW;
@@ -156,7 +152,7 @@ namespace mongo {
return s.str();
}
- string escape( string s ) {
+ string escape( string s , bool escape_slash=false) {
stringstream ret;
for ( string::iterator i = s.begin(); i != s.end(); ++i ) {
switch ( *i ) {
@@ -167,7 +163,7 @@ namespace mongo {
ret << "\\\\";
break;
case '/':
- ret << "\\/";
+ ret << (escape_slash ? "\\/" : "/");
break;
case '\b':
ret << "\\b";
@@ -306,17 +302,13 @@ namespace mongo {
s << " )";
break;
case RegEx:
- if ( format == Strict )
- s << "{ \"$regex\" : \"";
- else
- s << "/";
- s << escape( regex() );
- if ( format == Strict )
+ if ( format == Strict ){
+ s << "{ \"$regex\" : \"" << escape( regex() );
s << "\", \"$options\" : \"" << regexFlags() << "\" }";
- else {
- s << "/";
+ } else {
+ s << "/" << escape( regex() , true ) << "/";
// FIXME Worry about alpha order?
- for ( const char *f = regexFlags(); *f; ++f )
+ for ( const char *f = regexFlags(); *f; ++f ){
switch ( *f ) {
case 'g':
case 'i':
@@ -325,6 +317,7 @@ namespace mongo {
default:
break;
}
+ }
}
break;
@@ -413,7 +406,8 @@ namespace mongo {
default: {
stringstream ss;
ss << "BSONElement: bad type " << (int) type();
- massert( 10320 , ss.str().c_str(),false);
+ string msg = ss.str();
+ massert( 10320 , msg.c_str(),false);
}
}
totalSize = x + fieldNameSize() + 1; // BSONType
@@ -434,8 +428,12 @@ namespace mongo {
else if ( fn[3] == 'e' && fn[4] == 0 ) return BSONObj::LTE;
}
}
- else if ( fn[1] == 'n' && fn[2] == 'e' && fn[3] == 0)
- return BSONObj::NE;
+ else if ( fn[1] == 'n' && fn[2] == 'e' ){
+ if ( fn[3] == 0 )
+ return BSONObj::NE;
+ if ( fn[3] == 'a' && fn[4] == 'r' && fn[5] == 0 )
+ return BSONObj::opNEAR;
+ }
else if ( fn[1] == 'm' && fn[2] == 'o' && fn[3] == 'd' && fn[4] == 0 )
return BSONObj::opMOD;
else if ( fn[1] == 't' && fn[2] == 'y' && fn[3] == 'p' && fn[4] == 'e' && fn[5] == 0 )
@@ -458,6 +456,8 @@ namespace mongo {
return BSONObj::opREGEX;
else if ( fn[1] == 'o' && fn[2] == 'p' && fn[3] == 't' && fn[4] == 'i' && fn[5] == 'o' && fn[6] == 'n' && fn[7] == 's' && fn[8] == 0 )
return BSONObj::opOPTIONS;
+ else if ( fn[1] == 'w' && fn[2] == 'i' && fn[3] == 't' && fn[4] == 'h' && fn[5] == 'i' && fn[6] == 'n' && fn[7] == 0 )
+ return BSONObj::opWITHIN;
}
return def;
}
@@ -541,13 +541,18 @@ namespace mongo {
case Object:
case Array:
return l.embeddedObject().woCompare( r.embeddedObject() );
- case DBRef:
- case BinData: {
+ case DBRef: {
int lsz = l.valuesize();
int rsz = r.valuesize();
if ( lsz - rsz != 0 ) return lsz - rsz;
return memcmp(l.value(), r.value(), lsz);
}
+ case BinData: {
+ int lsz = l.objsize(); // our bin data size in bytes, not including the subtype byte
+ int rsz = r.objsize();
+ if ( lsz - rsz != 0 ) return lsz - rsz;
+ return memcmp(l.value()+4, r.value()+4, lsz+1);
+ }
case RegEx:
{
int c = strcmp(l.regex(), r.regex());
@@ -576,31 +581,35 @@ namespace mongo {
void BSONElement::validate() const {
switch( type() ) {
- case DBRef:
- case Code:
- case Symbol:
- case String:
- massert( 10321 , "Invalid dbref/code/string/symbol size",
- valuestrsize() > 0 &&
- valuestrsize() - 1 == strnlen( valuestr(), valuestrsize() ) );
- break;
- case CodeWScope: {
- int totalSize = *( int * )( value() );
- massert( 10322 , "Invalid CodeWScope size", totalSize >= 8 );
- int strSizeWNull = *( int * )( value() + 4 );
- massert( 10323 , "Invalid CodeWScope string size", totalSize >= strSizeWNull + 4 + 4 );
- massert( 10324 , "Invalid CodeWScope string size",
- strSizeWNull > 0 &&
- strSizeWNull - 1 == strnlen( codeWScopeCode(), strSizeWNull ) );
- massert( 10325 , "Invalid CodeWScope size", totalSize >= strSizeWNull + 4 + 4 + 4 );
- int objSize = *( int * )( value() + 4 + 4 + strSizeWNull );
- massert( 10326 , "Invalid CodeWScope object size", totalSize == 4 + 4 + strSizeWNull + objSize );
- // Subobject validation handled elsewhere.
- }
- case Object:
- // We expect Object size validation to be handled elsewhere.
- default:
- break;
+ case DBRef:
+ case Code:
+ case Symbol:
+ case String: {
+ int x = valuestrsize();
+ if ( x > 0 && valuestr()[x-1] == 0 )
+ return;
+ StringBuilder buf;
+ buf << "Invalid dbref/code/string/symbol size: " << x << " strnlen:" << strnlen( valuestr() , x );
+ massert( 10321 , buf.str() , 0 );
+ break;
+ }
+ case CodeWScope: {
+ int totalSize = *( int * )( value() );
+ massert( 10322 , "Invalid CodeWScope size", totalSize >= 8 );
+ int strSizeWNull = *( int * )( value() + 4 );
+ massert( 10323 , "Invalid CodeWScope string size", totalSize >= strSizeWNull + 4 + 4 );
+ massert( 10324 , "Invalid CodeWScope string size",
+ strSizeWNull > 0 &&
+ strSizeWNull - 1 == strnlen( codeWScopeCode(), strSizeWNull ) );
+ massert( 10325 , "Invalid CodeWScope size", totalSize >= strSizeWNull + 4 + 4 + 4 );
+ int objSize = *( int * )( value() + 4 + 4 + strSizeWNull );
+ massert( 10326 , "Invalid CodeWScope object size", totalSize == 4 + 4 + strSizeWNull + objSize );
+ // Subobject validation handled elsewhere.
+ }
+ case Object:
+ // We expect Object size validation to be handled elsewhere.
+ default:
+ break;
}
}
@@ -653,7 +662,7 @@ namespace mongo {
const string& c = l.substr( lstart , lend - lstart );
const string& d = r.substr( rstart , rend - rstart );
- int x = c.compare( d );
+ int x = lexNumCmp( c.c_str(), d.c_str() );
if ( x < 0 )
return LEFT_BEFORE;
@@ -766,9 +775,18 @@ namespace mongo {
if ( r.eoo() )
return 1;
- int x = l.woCompare( r, considerFieldName );
- if ( ordered && o.number() < 0 )
- x = -x;
+ int x;
+/*
+ if( ordered && o.type() == String && strcmp(o.valuestr(), "ascii-proto") == 0 &&
+ l.type() == String && r.type() == String ) {
+ // note: no negative support yet, as this is just sort of a POC
+ x = _stricmp(l.valuestr(), r.valuestr());
+ }
+ else*/ {
+ x = l.woCompare( r, considerFieldName );
+ if ( ordered && o.number() < 0 )
+ x = -x;
+ }
if ( x != 0 )
return x;
}
@@ -809,18 +827,6 @@ namespace mongo {
}
- BSONElement BSONObj::getField(const char *name) const {
- BSONObjIterator i(*this);
- while ( i.moreWithEOO() ) {
- BSONElement e = i.next();
- if ( e.eoo() )
- break;
- if ( strcmp(e.fieldName(), name) == 0 )
- return e;
- }
- return nullElement;
- }
-
/* return has eoo() true if no match
supports "." notation to reach into embedded objects
*/
@@ -838,49 +844,62 @@ namespace mongo {
return e;
}
- /* jul09 : 'deep' and this function will be going away in the future - kept only for backward compatibility of datafiles for now. */
- void trueDat( bool *deep ) {
- if( deep )
- *deep = true;
- }
+ void BSONObj::getFieldsDotted(const char *name, BSONElementSet &ret ) const {
+ BSONObjIterator i(*this);
+ while ( i.more() ){
+ BSONElement e = i.next();
+ FieldCompareResult cmp = compareDottedFieldNames( name , e.fieldName() );
+ switch ( cmp ){
- void BSONObj::getFieldsDotted(const char *name, BSONElementSet &ret, bool *deep ) const {
- BSONElement e = getField( name );
- if ( e.eoo() ) {
- const char *p = strchr(name, '.');
- if ( p ) {
- string left(name, p-name);
- BSONElement e = getField( left );
- if ( e.type() == Array ) {
- trueDat( deep );
- BSONObjIterator i( e.embeddedObject() );
- while( i.moreWithEOO() ) {
- BSONElement f = i.next();
- if ( f.eoo() )
- break;
+ case LEFT_BEFORE:
+ case RIGHT_BEFORE:
+ break;
+
+ case RIGHT_SUBFIELD:
+ assert(0);
+ break;
+
+ case LEFT_SUBFIELD: {
+ const char * next = name + strlen( e.fieldName() ) + 1;
+ bool allDigits = false;
+ if ( isdigit( *next ) ){
+ const char * temp = next + 1;
+ while ( isdigit( *temp ) )
+ temp++;
+ allDigits = *temp == '.';
+ }
+
+ if ( e.type() == Object || allDigits ){
+ e.embeddedObject().getFieldsDotted( next , ret );
+ }
+ else if ( e.type() == Array ){
+ BSONObjIterator j( e.embeddedObject() );
+ while ( j.more() ){
+ BSONElement f = j.next();
if ( f.type() == Object )
- f.embeddedObject().getFieldsDotted(p+1, ret);
+ f.embeddedObject().getFieldsDotted( next , ret );
}
- } else if ( e.type() == Object ) {
- e.embeddedObject().getFieldsDotted(p+1, ret);
}
+ else {
+ // intentially left blank, this means no match
+ }
+ return;
}
- } else {
- if ( e.type() == Array ) {
- trueDat( deep );
- BSONObjIterator i( e.embeddedObject() );
- while( i.moreWithEOO() ) {
- BSONElement f = i.next();
- if ( f.eoo() )
- break;
- ret.insert( f );
+
+ case SAME: {
+ if ( e.type() == Array ){
+ BSONObjIterator j( e.embeddedObject() );
+ while ( j.more() )
+ ret.insert( j.next() );
}
- } else {
- ret.insert( e );
+ else {
+ ret.insert( e );
+ }
+ return;
+ }
+
}
}
- if ( ret.empty() && deep )
- *deep = false;
}
BSONElement BSONObj::getFieldDottedOrArray(const char *&name) const {
@@ -1141,7 +1160,10 @@ namespace mongo {
if ( strchr( name , '.' ) ||
strchr( name , '$' ) ){
- return false;
+ return
+ strcmp( name , "$ref" ) == 0 ||
+ strcmp( name , "$id" ) == 0
+ ;
}
if ( e.mayEncapsulate() ){
@@ -1410,7 +1432,7 @@ namespace mongo {
}
void OID::init() {
- static WrappingInt inc = (unsigned) security.getNonce();
+ static AtomicUInt inc = (unsigned) security.getNonce();
unsigned t = (unsigned) time(0);
char *T = (char *) &t;
data[0] = T[3];
@@ -1420,7 +1442,7 @@ namespace mongo {
(unsigned&) data[4] = _machine;
- int new_inc = inc.atomicIncrement();
+ int new_inc = inc++;
T = (char *) &new_inc;
char * raw = (char*)&b;
raw[0] = T[3];
@@ -1464,7 +1486,7 @@ namespace mongo {
Labeler::Label SIZE( "$size" );
void BSONElementManipulator::initTimestamp() {
- massert( 10332 , "Expected CurrentTime type", element_.type() == Timestamp );
+ massert( 10332 , "Expected CurrentTime type", _element.type() == Timestamp );
unsigned long long &timestamp = *( reinterpret_cast< unsigned long long* >( value() ) );
if ( timestamp == 0 )
timestamp = OpTime::now().asDate();
@@ -1610,12 +1632,23 @@ namespace mongo {
}
+ void BSONObjBuilder::appendKeys( const BSONObj& keyPattern , const BSONObj& values ){
+ BSONObjIterator i(keyPattern);
+ BSONObjIterator j(values);
+
+ while ( i.more() && j.more() ){
+ appendAs( j.next() , i.next().fieldName() );
+ }
+
+ assert( ! i.more() );
+ assert( ! j.more() );
+ }
int BSONElementFieldSorter( const void * a , const void * b ){
const char * x = *((const char**)a);
const char * y = *((const char**)b);
x++; y++;
- return strcmp( x , y );
+ return lexNumCmp( x , y );
}
BSONObjIteratorSorted::BSONObjIteratorSorted( const BSONObj& o ){
diff --git a/db/jsobj.h b/db/jsobj.h
index 4030122..aaf059b 100644
--- a/db/jsobj.h
+++ b/db/jsobj.h
@@ -480,7 +480,7 @@ namespace mongo {
BSONObj embeddedObject() const;
/* uasserts if not an object */
- BSONObj embeddedObjectUserCheck();
+ BSONObj embeddedObjectUserCheck() const;
BSONObj codeWScopeObject() const;
@@ -509,7 +509,7 @@ namespace mongo {
BinDataType binDataType() const {
// BinData: <int len> <byte subtype> <byte[len] data>
assert( type() == BinData );
- char c = (value() + 4)[0];
+ unsigned char c = (value() + 4)[0];
return (BinDataType)c;
}
@@ -574,9 +574,25 @@ namespace mongo {
/** True if this element may contain subobjects. */
bool mayEncapsulate() const {
- return type() == Object ||
- type() == Array ||
- type() == CodeWScope;
+ switch ( type() ){
+ case Object:
+ case Array:
+ case CodeWScope:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /** True if this element can be a BSONObj */
+ bool isABSONObj() const {
+ switch( type() ){
+ case Object:
+ case Array:
+ return true;
+ default:
+ return false;
+ }
}
Date_t timestampTime() const{
@@ -625,7 +641,7 @@ namespace mongo {
mutable int fieldNameSize_; // cached value
int fieldNameSize() const {
if ( fieldNameSize_ == -1 )
- fieldNameSize_ = strlen( fieldName() ) + 1;
+ fieldNameSize_ = (int)strlen( fieldName() ) + 1;
return fieldNameSize_;
}
mutable int totalSize; /* caches the computed size */
@@ -635,7 +651,7 @@ namespace mongo {
struct BSONElementCmpWithoutField {
bool operator()( const BSONElement &l, const BSONElement &r ) const {
- return l.woCompare( r, false );
+ return l.woCompare( r, false ) < 0;
}
};
@@ -700,6 +716,11 @@ namespace mongo {
if ( ! isValid() ){
stringstream ss;
ss << "Invalid BSONObj spec size: " << objsize();
+ try {
+ BSONElement e = firstElement();
+ ss << " first element:" << e.toString() << " ";
+ }
+ catch ( ... ){}
string s = ss.str();
massert( 10334 , s , 0 );
}
@@ -759,7 +780,7 @@ namespace mongo {
BSONElement getFieldDotted(const char *name) const;
/** Like getFieldDotted(), but expands multikey arrays and returns all matching objects
*/
- void getFieldsDotted(const char *name, BSONElementSet &ret, bool *deep = 0) const;
+ void getFieldsDotted(const char *name, BSONElementSet &ret ) const;
/** Like getFieldDotted(), but returns first array encountered while traversing the
dotted fields of name. The name variable is updated to represent field
names with respect to the returned element. */
@@ -768,14 +789,14 @@ namespace mongo {
/** Get the field of the specified name. eoo() is true on the returned
element if not found.
*/
- BSONElement getField(const string name) const {
- return getField( name.c_str() );
- };
+ BSONElement getField(const char *name) const;
/** Get the field of the specified name. eoo() is true on the returned
element if not found.
*/
- BSONElement getField(const char *name) const; /* return has eoo() true if no match */
+ BSONElement getField(const string name) const {
+ return getField( name.c_str() );
+ };
/** Get the field of the specified name. eoo() is true on the returned
element if not found.
@@ -902,13 +923,9 @@ namespace mongo {
return BSONElement(objdata() + 4);
}
- /** @return element with fieldname "name". returnvalue.eoo() is true if not found */
- BSONElement findElement(const char *name) const;
-
- /** @return element with fieldname "name". returnvalue.eoo() is true if not found */
- BSONElement findElement(string name) const {
- return findElement(name.c_str());
- }
+ /** use getField() instead. */
+ //BSONElement getField(const char *name) const;
+ //BSONElement getField(string name) const {
/** @return true if field exists in the object */
bool hasElement(const char *name) const;
@@ -976,7 +993,9 @@ namespace mongo {
opTYPE = 0x0F,
opREGEX = 0x10,
opOPTIONS = 0x11,
- opELEM_MATCH = 0x12
+ opELEM_MATCH = 0x12,
+ opNEAR = 0x13,
+ opWITHIN = 0x14,
};
};
ostream& operator<<( ostream &s, const BSONObj &o );
@@ -1028,7 +1047,7 @@ namespace mongo {
BSON( "a" << GT << 23.4 << NE << 30 << "b" << 2 ) produces the object
{ a: { \$gt: 23.4, \$ne: 30 }, b: 2 }.
*/
-#define BSON(x) (( mongo::BSONObjBuilder() << x ).obj())
+#define BSON(x) (( mongo::BSONObjBuilder(64) << x ).obj())
/** Use BSON_ARRAY macro like BSON macro, but without keys
@@ -1042,7 +1061,6 @@ namespace mongo {
cout << BSON( GENOID << "z" << 3 ); // { _id : ..., z : 3 }
*/
extern struct IDLabeler { } GENOID;
- BSONObjBuilder& operator<<(BSONObjBuilder& b, IDLabeler& id);
/* Utility class to add a Date element with the current time
Example:
@@ -1107,20 +1125,63 @@ namespace mongo {
};
/**
+ used in conjuction with BSONObjBuilder, allows for proper buffer size to prevent crazy memory usage
+ */
+ class BSONSizeTracker {
+ public:
+#define BSONSizeTrackerSize 10
+
+ BSONSizeTracker(){
+ _pos = 0;
+ for ( int i=0; i<BSONSizeTrackerSize; i++ )
+ _sizes[i] = 512; // this is the default, so just be consistent
+ }
+
+ ~BSONSizeTracker(){
+ }
+
+ void got( int size ){
+ _sizes[_pos++] = size;
+ if ( _pos >= BSONSizeTrackerSize )
+ _pos = 0;
+ }
+
+ /**
+ * right now choosing largest size
+ */
+ int getSize() const {
+ int x = 16; // sane min
+ for ( int i=0; i<BSONSizeTrackerSize; i++ ){
+ if ( _sizes[i] > x )
+ x = _sizes[i];
+ }
+ return x;
+ }
+
+ private:
+ int _pos;
+ int _sizes[BSONSizeTrackerSize];
+ };
+
+ /**
utility for creating a BSONObj
*/
class BSONObjBuilder : boost::noncopyable {
public:
/** @param initsize this is just a hint as to the final size of the object */
- BSONObjBuilder(int initsize=512) : b(buf_), buf_(initsize), offset_( 0 ), s_( this ) {
+ BSONObjBuilder(int initsize=512) : b(buf_), buf_(initsize), offset_( 0 ), s_( this ) , _tracker(0) {
b.skip(4); /*leave room for size field*/
}
/** @param baseBuilder construct a BSONObjBuilder using an existing BufBuilder */
- BSONObjBuilder( BufBuilder &baseBuilder ) : b( baseBuilder ), buf_( 0 ), offset_( baseBuilder.len() ), s_( this ) {
+ BSONObjBuilder( BufBuilder &baseBuilder ) : b( baseBuilder ), buf_( 0 ), offset_( baseBuilder.len() ), s_( this ) , _tracker(0) {
b.skip( 4 );
}
+ BSONObjBuilder( const BSONSizeTracker & tracker ) : b(buf_) , buf_(tracker.getSize() ), offset_(0), s_( this ) , _tracker( (BSONSizeTracker*)(&tracker) ){
+ b.skip( 4 );
+ }
+
/** add all the fields from the object specified to this object */
BSONObjBuilder& appendElements(BSONObj x);
@@ -1188,6 +1249,13 @@ namespace mongo {
b.append((char) (val?1:0));
}
+ /** Append a boolean element */
+ void append(const char *fieldName, bool val) {
+ b.append((char) Bool);
+ b.append(fieldName);
+ b.append((char) (val?1:0));
+ }
+
/** Append a 32 bit integer element */
void append(const char *fieldName, int n) {
b.append((char) NumberInt);
@@ -1214,7 +1282,42 @@ namespace mongo {
append( fieldName.c_str() , n );
}
+ /** appends a number. if n < max(int)/2 then uses int, otherwise long long */
+ void appendIntOrLL( const string& fieldName , long long n ){
+ long long x = n;
+ if ( x < 0 )
+ x = x * -1;
+ if ( x < ( numeric_limits<int>::max() / 2 ) )
+ append( fieldName.c_str() , (int)n );
+ else
+ append( fieldName.c_str() , n );
+ }
+
+
+ /**
+ * appendNumber is a series of method for appending the smallest sensible type
+ * mostly for JS
+ */
+ void appendNumber( const string& fieldName , int n ){
+ append( fieldName.c_str() , n );
+ }
+ void appendNumber( const string& fieldName , double d ){
+ append( fieldName.c_str() , d );
+ }
+
+ void appendNumber( const string& fieldName , long long l ){
+ static long long maxInt = (int)pow( 2.0 , 30.0 );
+ static long long maxDouble = (long long)pow( 2.0 , 40.0 );
+
+ if ( l < maxInt )
+ append( fieldName.c_str() , (int)l );
+ else if ( l < maxDouble )
+ append( fieldName.c_str() , (double)l );
+ else
+ append( fieldName.c_str() , l );
+ }
+
/** Append a double element */
BSONObjBuilder& append(const char *fieldName, double n) {
b.append((char) NumberDouble);
@@ -1451,6 +1554,16 @@ namespace mongo {
return BSONObj(_done());
}
+ /** Peek at what is in the builder, but leave the builder ready for more appends.
+ The returned object is only valid until the next modification or destruction of the builder.
+ Intended use case: append a field if not already there.
+ */
+ BSONObj asTempObj() {
+ BSONObj temp(_done());
+ b.setlen(b.len()-1); //next append should overwrite the EOO
+ return temp;
+ }
+
/* assume ownership of the buffer - you must then free it (with free()) */
char* decouple(int& l) {
char *x = _done();
@@ -1463,6 +1576,7 @@ namespace mongo {
b.decouple(); // post done() call version. be sure jsobj frees...
}
+ void appendKeys( const BSONObj& keyPattern , const BSONObj& values );
private:
static const string numStrs[100]; // cache of 0 to 99 inclusive
@@ -1482,6 +1596,14 @@ namespace mongo {
return s_;
}
+ /** Stream oriented way to add field names and values. */
+ BSONObjBuilder& operator<<( IDLabeler ) {
+ OID oid;
+ oid.init();
+ appendOID("_id", &oid);
+ return *this;
+ }
+
// prevent implicit string conversions which would allow bad things like BSON( BSON( "foo" << 1 ) << 2 )
struct ForceExplicitString {
ForceExplicitString( const string &str ) : str_( str ) {}
@@ -1509,12 +1631,15 @@ namespace mongo {
b.append( fieldName );
b.append( (void *) arr.objdata(), arr.objsize() );
}
-
+
char* _done() {
s_.endField();
b.append((char) EOO);
char *data = b.buf() + offset_;
- *((int*)data) = b.len() - offset_;
+ int size = b.len() - offset_;
+ *((int*)data) = size;
+ if ( _tracker )
+ _tracker->got( size );
return data;
}
@@ -1522,34 +1647,88 @@ namespace mongo {
BufBuilder buf_;
int offset_;
BSONObjBuilderValueStream s_;
+ BSONSizeTracker * _tracker;
};
class BSONArrayBuilder : boost::noncopyable{
public:
- BSONArrayBuilder() :i(0), b() {}
+ BSONArrayBuilder() : _i(0), _b() {}
+ BSONArrayBuilder( BufBuilder &b ) : _i(0), _b(b) {}
template <typename T>
BSONArrayBuilder& append(const T& x){
- b.append(num().c_str(), x);
+ _b.append(num().c_str(), x);
return *this;
}
BSONArrayBuilder& append(const BSONElement& e){
- b.appendAs(e, num().c_str());
+ _b.appendAs(e, num().c_str());
return *this;
}
-
+
template <typename T>
BSONArrayBuilder& operator<<(const T& x){
return append(x);
}
+
+ void appendNull() {
+ _b.appendNull(num().c_str());
+ }
- BSONArray arr(){ return BSONArray(b.obj()); }
+ BSONArray arr(){ return BSONArray(_b.obj()); }
+
+ BSONObj done() { return _b.done(); }
+
+ template <typename T>
+ BSONArrayBuilder& append(const char *name, const T& x){
+ fill( name );
+ append( x );
+ return *this;
+ }
+
+ BufBuilder &subobjStart( const char *name ) {
+ fill( name );
+ return _b.subobjStart( num().c_str() );
+ }
+ BufBuilder &subarrayStart( const char *name ) {
+ fill( name );
+ return _b.subarrayStart( num().c_str() );
+ }
+
+ void appendArray( const char *name, BSONObj subObj ) {
+ fill( name );
+ _b.appendArray( num().c_str(), subObj );
+ }
+
+ void appendAs( const BSONElement &e, const char *name ) {
+ fill( name );
+ append( e );
+ }
+
private:
- string num(){ return b.numStr(i++); }
- int i;
- BSONObjBuilder b;
+ void fill( const char *name ) {
+ char *r;
+ int n = strtol( name, &r, 10 );
+ uassert( 13048, "can't append to array using string field name", !*r );
+ while( _i < n )
+ append( nullElt() );
+ }
+
+ static BSONElement nullElt() {
+ static BSONObj n = nullObj();
+ return n.firstElement();
+ }
+
+ static BSONObj nullObj() {
+ BSONObjBuilder b;
+ b.appendNull( "" );
+ return b.obj();
+ }
+
+ string num(){ return _b.numStr(_i++); }
+ int _i;
+ BSONObjBuilder _b;
};
@@ -1584,8 +1763,8 @@ namespace mongo {
/** @return the next element in the object. For the final element, element.eoo() will be true. */
BSONElement next( bool checkEnd = false ) {
assert( pos < theend );
- BSONElement e( pos, checkEnd ? theend - pos : -1 );
- pos += e.size( checkEnd ? theend - pos : -1 );
+ BSONElement e( pos, checkEnd ? (int)(theend - pos) : -1 );
+ pos += e.size( checkEnd ? (int)(theend - pos) : -1 );
return e;
}
private:
@@ -1653,13 +1832,13 @@ namespace mongo {
#define CHECK_OBJECT( o , msg )
#endif
- inline BSONObj BSONElement::embeddedObjectUserCheck() {
- uassert( 10065 , "invalid parameter: expected an object", type()==Object || type()==Array );
+ inline BSONObj BSONElement::embeddedObjectUserCheck() const {
+ uassert( 10065 , "invalid parameter: expected an object", isABSONObj() );
return BSONObj(value());
}
inline BSONObj BSONElement::embeddedObject() const {
- assert( type()==Object || type()==Array );
+ assert( isABSONObj() );
return BSONObj(value());
}
@@ -1701,14 +1880,12 @@ namespace mongo {
return false;
}
- inline BSONElement BSONObj::findElement(const char *name) const {
- if ( !isEmpty() ) {
- BSONObjIterator it(*this);
- while ( it.moreWithEOO() ) {
- BSONElement e = it.next();
- if ( strcmp(name, e.fieldName()) == 0 )
- return e;
- }
+ inline BSONElement BSONObj::getField(const char *name) const {
+ BSONObjIterator i(*this);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( strcmp(e.fieldName(), name) == 0 )
+ return e;
}
return BSONElement();
}
@@ -1729,7 +1906,7 @@ namespace mongo {
}
inline bool BSONObj::getObjectID(BSONElement& e) const {
- BSONElement f = findElement("_id");
+ BSONElement f = getField("_id");
if( !f.eoo() ) {
e = f;
return true;
@@ -1845,7 +2022,7 @@ namespace mongo {
~BSONObjIteratorSorted(){
assert( _fields );
- delete _fields;
+ delete[] _fields;
_fields = 0;
}
diff --git a/db/jsobjmanipulator.h b/db/jsobjmanipulator.h
index d534d08..1771bff 100644
--- a/db/jsobjmanipulator.h
+++ b/db/jsobjmanipulator.h
@@ -22,57 +22,63 @@
namespace mongo {
-/** Manipulate the binary representation of a BSONElement in-place.
- Careful, this casts away const.
- */
-class BSONElementManipulator {
-public:
- BSONElementManipulator( const BSONElement &element ) :
- element_( element ) {
- assert( !element_.eoo() );
- }
- /** Replace a Timestamp type with a Date type initialized to
- OpTime::now().asDate()
- */
- void initTimestamp();
-
- /** Change the value, in place, of the number. */
- void setNumber(double d) {
- if ( element_.type() == NumberDouble ) *reinterpret_cast< double * >( value() ) = d;
- else if ( element_.type() == NumberInt ) *reinterpret_cast< int * >( value() ) = (int) d;
- }
- void setLong(long long n) {
- if( element_.type() == NumberLong ) *reinterpret_cast< long long * >( value() ) = n;
- }
+ /** Manipulate the binary representation of a BSONElement in-place.
+ Careful, this casts away const.
+ */
+ class BSONElementManipulator {
+ public:
+ BSONElementManipulator( const BSONElement &element ) :
+ _element( element ) {
+ assert( !_element.eoo() );
+ }
+ /** Replace a Timestamp type with a Date type initialized to
+ OpTime::now().asDate()
+ */
+ void initTimestamp();
+
+ /** Change the value, in place, of the number. */
+ void setNumber(double d) {
+ if ( _element.type() == NumberDouble ) *reinterpret_cast< double * >( value() ) = d;
+ else if ( _element.type() == NumberInt ) *reinterpret_cast< int * >( value() ) = (int) d;
+ }
+ void setLong(long long n) {
+ if( _element.type() == NumberLong ) *reinterpret_cast< long long * >( value() ) = n;
+ }
+ void setInt(int n) {
+ assert( _element.type() == NumberInt );
+ *reinterpret_cast< int * >( value() ) = n;
+ }
- /** Replace the type and value of the element with the type and value of e,
- preserving the original fieldName */
- void replaceTypeAndValue( const BSONElement &e ) {
- *data() = e.type();
- memcpy( value(), e.value(), e.valuesize() );
- }
-
- static void lookForTimestamps( const BSONObj& obj ){
- // If have a Timestamp field as the first or second element,
- // update it to a Date field set to OpTime::now().asDate(). The
- // replacement policy is a work in progress.
- BSONObjIterator i( obj );
- for( int j = 0; i.moreWithEOO() && j < 2; ++j ) {
- BSONElement e = i.next();
- if ( e.eoo() )
- break;
- if ( e.type() == Timestamp ){
- BSONElementManipulator( e ).initTimestamp();
- break;
+ /** Replace the type and value of the element with the type and value of e,
+ preserving the original fieldName */
+ void replaceTypeAndValue( const BSONElement &e ) {
+ *data() = e.type();
+ memcpy( value(), e.value(), e.valuesize() );
+ }
+
+ static void lookForTimestamps( const BSONObj& obj ){
+ // If have a Timestamp field as the first or second element,
+ // update it to a Date field set to OpTime::now().asDate(). The
+ // replacement policy is a work in progress.
+
+ BSONObjIterator i( obj );
+ for( int j = 0; i.moreWithEOO() && j < 2; ++j ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ if ( e.type() == Timestamp ){
+ BSONElementManipulator( e ).initTimestamp();
+ break;
+ }
}
}
- }
-private:
- char *data() { return nonConst( element_.rawdata() ); }
- char *value() { return nonConst( element_.value() ); }
- static char *nonConst( const char *s ) { return const_cast< char * >( s ); }
- const BSONElement element_;
-};
+ private:
+ char *data() { return nonConst( _element.rawdata() ); }
+ char *value() { return nonConst( _element.value() ); }
+ static char *nonConst( const char *s ) { return const_cast< char * >( s ); }
+
+ const BSONElement _element;
+ };
} // namespace mongo
diff --git a/db/json.cpp b/db/json.cpp
index b55ddb1..7645b6b 100644
--- a/db/json.cpp
+++ b/db/json.cpp
@@ -20,6 +20,7 @@
#include "json.h"
#include "../util/builder.h"
#include "../util/base64.h"
+#include "../util/hex.h"
using namespace boost::spirit;
@@ -167,27 +168,11 @@ namespace mongo {
ObjectBuilder &b;
};
- namespace hex {
- int val( char c ) {
- if ( '0' <= c && c <= '9' )
- return c - '0';
- if ( 'a' <= c && c <= 'f' )
- return c - 'a' + 10;
- if ( 'A' <= c && c <= 'F' )
- return c - 'A' + 10;
- assert( false );
- return 0xff;
- }
- char val( const char *c ) {
- return ( val( c[ 0 ] ) << 4 ) | val( c[ 1 ] );
- }
- } // namespace hex
-
struct chU {
chU( ObjectBuilder &_b ) : b( _b ) {}
void operator() ( const char *start, const char *end ) const {
- unsigned char first = hex::val( start );
- unsigned char second = hex::val( start + 2 );
+ unsigned char first = fromHex( start );
+ unsigned char second = fromHex( start + 2 );
if ( first == 0 && second < 0x80 )
b.ss << second;
else if ( first < 0x08 ) {
@@ -315,7 +300,7 @@ namespace mongo {
OID oid;
char *oidP = (char *)( &oid );
for ( int i = 0; i < 12; ++i )
- oidP[ i ] = hex::val( s + ( i * 2 ) );
+ oidP[ i ] = fromHex( s + ( i * 2 ) );
return oid;
}
@@ -356,7 +341,7 @@ namespace mongo {
struct binDataType {
binDataType( ObjectBuilder &_b ) : b( _b ) {}
void operator() ( const char *start, const char *end ) const {
- b.binDataType = BinDataType( hex::val( start ) );
+ b.binDataType = BinDataType( fromHex( start ) );
}
ObjectBuilder &b;
};
diff --git a/db/lasterror.cpp b/db/lasterror.cpp
index e8b1fcf..9fefcfa 100644
--- a/db/lasterror.cpp
+++ b/db/lasterror.cpp
@@ -28,7 +28,7 @@ namespace mongo {
LastError LastError::noError;
LastErrorHolder lastError;
- boost::mutex LastErrorHolder::_idsmutex;
+ mongo::mutex LastErrorHolder::_idsmutex;
void LastError::appendSelf( BSONObjBuilder &b ) {
if ( !valid ) {
@@ -75,7 +75,7 @@ namespace mongo {
if ( id == 0 )
return _tl.get();
- boostlock lock(_idsmutex);
+ scoped_lock lock(_idsmutex);
map<int,Status>::iterator i = _ids.find( id );
if ( i == _ids.end() ){
if ( ! create )
@@ -95,7 +95,7 @@ namespace mongo {
}
void LastErrorHolder::remove( int id ){
- boostlock lock(_idsmutex);
+ scoped_lock lock(_idsmutex);
map<int,Status>::iterator i = _ids.find( id );
if ( i == _ids.end() )
return;
@@ -121,7 +121,7 @@ namespace mongo {
return;
}
- boostlock lock(_idsmutex);
+ scoped_lock lock(_idsmutex);
Status & status = _ids[id];
status.time = time(0);
status.lerr = le;
diff --git a/db/lasterror.h b/db/lasterror.h
index 8f687bb..78160eb 100644
--- a/db/lasterror.h
+++ b/db/lasterror.h
@@ -30,7 +30,7 @@ namespace mongo {
string msg;
enum UpdatedExistingType { NotUpdate, True, False } updatedExisting;
/* todo: nObjects should be 64 bit */
- int nObjects;
+ long long nObjects;
int nPrev;
bool valid;
bool overridenById;
@@ -40,12 +40,12 @@ namespace mongo {
code = _code;
msg = _msg;
}
- void recordUpdate( bool _updatedExisting, int nChanged ) {
+ void recordUpdate( bool _updatedExisting, long long nChanged ) {
reset( true );
nObjects = nChanged;
updatedExisting = _updatedExisting ? True : False;
}
- void recordDelete( int nDeleted ) {
+ void recordDelete( long long nDeleted ) {
reset( true );
nObjects = nDeleted;
}
@@ -100,7 +100,7 @@ namespace mongo {
time_t time;
LastError *lerr;
};
- static boost::mutex _idsmutex;
+ static mongo::mutex _idsmutex;
map<int,Status> _ids;
} lastError;
diff --git a/db/matcher.cpp b/db/matcher.cpp
index d71b7ef..8c904e3 100644
--- a/db/matcher.cpp
+++ b/db/matcher.cpp
@@ -22,16 +22,35 @@
#include "matcher.h"
#include "../util/goodies.h"
#include "../util/unittest.h"
-#include "storage.h"
+#include "diskloc.h"
#include "../scripting/engine.h"
#include "db.h"
#include "client.h"
+#include "pdfile.h"
+
+namespace {
+ inline pcrecpp::RE_Options flags2options(const char* flags){
+ pcrecpp::RE_Options options;
+ options.set_utf8(true);
+ while ( flags && *flags ) {
+ if ( *flags == 'i' )
+ options.set_caseless(true);
+ else if ( *flags == 'm' )
+ options.set_multiline(true);
+ else if ( *flags == 'x' )
+ options.set_extended(true);
+ flags++;
+ }
+ return options;
+ }
+}
+
+//#define DEBUGMATCHER(x) cout << x << endl;
+#define DEBUGMATCHER(x)
+
namespace mongo {
- //#include "minilex.h"
- //MiniLex minilex;
-
class Where {
public:
Where() {
@@ -66,52 +85,61 @@ namespace mongo {
where = 0;
}
- ElementMatcher::ElementMatcher( BSONElement _e , int _op ) : toMatch( _e ) , compareOp( _op ) {
+ ElementMatcher::ElementMatcher( BSONElement _e , int _op, bool _isNot ) : toMatch( _e ) , compareOp( _op ), isNot( _isNot ) {
if ( _op == BSONObj::opMOD ){
- BSONObj o = _e.embeddedObject().firstElement().embeddedObject();
+ BSONObj o = _e.embeddedObject();
mod = o["0"].numberInt();
modm = o["1"].numberInt();
uassert( 10073 , "mod can't be 0" , mod );
}
else if ( _op == BSONObj::opTYPE ){
- type = (BSONType)(_e.embeddedObject().firstElement().numberInt());
+ type = (BSONType)(_e.numberInt());
}
else if ( _op == BSONObj::opELEM_MATCH ){
- BSONElement m = toMatch.embeddedObjectUserCheck().firstElement();
+ BSONElement m = _e;
uassert( 12517 , "$elemMatch needs an Object" , m.type() == Object );
subMatcher.reset( new Matcher( m.embeddedObject() ) );
}
}
-
- ElementMatcher::~ElementMatcher(){
- }
-
-
-
-} // namespace mongo
-
-#include "pdfile.h"
-
-namespace {
- inline pcrecpp::RE_Options flags2options(const char* flags){
- pcrecpp::RE_Options options;
- options.set_utf8(true);
- while ( flags && *flags ) {
- if ( *flags == 'i' )
- options.set_caseless(true);
- else if ( *flags == 'm' )
- options.set_multiline(true);
- else if ( *flags == 'x' )
- options.set_extended(true);
- flags++;
+ ElementMatcher::ElementMatcher( BSONElement _e , int _op , const BSONObj& array, bool _isNot )
+ : toMatch( _e ) , compareOp( _op ), isNot( _isNot ) {
+
+ myset.reset( new set<BSONElement,element_lt>() );
+
+ BSONObjIterator i( array );
+ while ( i.more() ) {
+ BSONElement ie = i.next();
+ if ( _op == BSONObj::opALL && ie.type() == Object && ie.embeddedObject().firstElement().getGtLtOp() == BSONObj::opELEM_MATCH ){
+ shared_ptr<Matcher> s;
+ s.reset( new Matcher( ie.embeddedObject().firstElement().embeddedObjectUserCheck() ) );
+ allMatchers.push_back( s );
+ } else if ( ie.type() == RegEx ) {
+ if ( !myregex.get() ) {
+ myregex.reset( new vector< RegexMatcher >() );
+ }
+ myregex->push_back( RegexMatcher() );
+ RegexMatcher &rm = myregex->back();
+ rm.re.reset( new pcrecpp::RE( ie.regex(), flags2options( ie.regexFlags() ) ) );
+ rm.fieldName = 0; // no need for field name
+ rm.regex = ie.regex();
+ rm.flags = ie.regexFlags();
+ rm.isNot = false;
+ bool purePrefix;
+ string prefix = simpleRegex(rm.regex, rm.flags, &purePrefix);
+ if (purePrefix)
+ rm.prefix = prefix;
+ } else {
+ myset->insert(ie);
+ }
}
- return options;
+
+ if ( allMatchers.size() ){
+ uassert( 13020 , "with $all, can't mix $elemMatch and others" , myset->size() == 0 && !myregex.get());
+ }
+
}
-}
-
-namespace mongo {
CoveredIndexMatcher::CoveredIndexMatcher(const BSONObj &jsobj, const BSONObj &indexKeyPattern) :
_keyMatcher(jsobj.filterFieldsUndotted(indexKeyPattern, true),
@@ -120,13 +148,18 @@ namespace mongo {
{
_needRecord = ! (
_docMatcher.keyMatch() &&
- _keyMatcher.jsobj.nFields() == _docMatcher.jsobj.nFields()
+ _keyMatcher.jsobj.nFields() == _docMatcher.jsobj.nFields() &&
+ ! _keyMatcher.hasType( BSONObj::opEXISTS )
);
+
}
- bool CoveredIndexMatcher::matches(const BSONObj &key, const DiskLoc &recLoc ) {
+ bool CoveredIndexMatcher::matches(const BSONObj &key, const DiskLoc &recLoc , MatchDetails * details ) {
+ if ( details )
+ details->reset();
+
if ( _keyMatcher.keyMatch() ) {
- if ( !_keyMatcher.matches(key) ) {
+ if ( !_keyMatcher.matches(key, details ) ){
return false;
}
}
@@ -135,14 +168,128 @@ namespace mongo {
return true;
}
- return _docMatcher.matches(recLoc.rec());
+ if ( details )
+ details->loadedObject = true;
+
+ return _docMatcher.matches(recLoc.rec() , details );
}
+ void Matcher::addRegex(const char *fieldName, const char *regex, const char *flags, bool isNot){
+
+ if ( nRegex >= 4 ) {
+ out() << "ERROR: too many regexes in query" << endl;
+ }
+ else {
+ RegexMatcher& rm = regexs[nRegex];
+ rm.re.reset( new pcrecpp::RE(regex, flags2options(flags)) );
+ rm.fieldName = fieldName;
+ rm.regex = regex;
+ rm.flags = flags;
+ rm.isNot = isNot;
+ nRegex++;
+
+ if (!isNot){ //TODO something smarter
+ bool purePrefix;
+ string prefix = simpleRegex(regex, flags, &purePrefix);
+ if (purePrefix)
+ rm.prefix = prefix;
+ }
+ }
+ }
+
+ bool Matcher::addOp( const BSONElement &e, const BSONElement &fe, bool isNot, const char *& regex, const char *&flags ) {
+ const char *fn = fe.fieldName();
+ int op = fe.getGtLtOp( -1 );
+ if ( op == -1 ){
+ if ( !isNot && fn[1] == 'r' && fn[2] == 'e' && fn[3] == 'f' && fn[4] == 0 ){
+ return false; // { $ref : xxx } - treat as normal object
+ }
+ uassert( 10068 , (string)"invalid operator: " + fn , op != -1 );
+ }
+
+ switch ( op ){
+ case BSONObj::GT:
+ case BSONObj::GTE:
+ case BSONObj::LT:
+ case BSONObj::LTE:{
+ shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ _builders.push_back( b );
+ b->appendAs(fe, e.fieldName());
+ addBasic(b->done().firstElement(), op, isNot);
+ break;
+ }
+ case BSONObj::NE:{
+ haveNeg = true;
+ shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ _builders.push_back( b );
+ b->appendAs(fe, e.fieldName());
+ addBasic(b->done().firstElement(), BSONObj::NE, isNot);
+ break;
+ }
+ case BSONObj::opALL:
+ all = true;
+ case BSONObj::opIN:
+ basics.push_back( ElementMatcher( e , op , fe.embeddedObject(), isNot ) );
+ break;
+ case BSONObj::NIN:
+ haveNeg = true;
+ basics.push_back( ElementMatcher( e , op , fe.embeddedObject(), isNot ) );
+ break;
+ case BSONObj::opMOD:
+ case BSONObj::opTYPE:
+ case BSONObj::opELEM_MATCH: {
+ shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ _builders.push_back( b );
+ b->appendAs(fe, e.fieldName());
+ // these are types where ElementMatcher has all the info
+ basics.push_back( ElementMatcher( b->done().firstElement() , op, isNot ) );
+ break;
+ }
+ case BSONObj::opSIZE:{
+ shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ _builders.push_back( b );
+ b->appendAs(fe, e.fieldName());
+ addBasic(b->done().firstElement(), BSONObj::opSIZE, isNot);
+ haveSize = true;
+ break;
+ }
+ case BSONObj::opEXISTS:{
+ shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ _builders.push_back( b );
+ b->appendAs(fe, e.fieldName());
+ addBasic(b->done().firstElement(), BSONObj::opEXISTS, isNot);
+ break;
+ }
+ case BSONObj::opREGEX:{
+ uassert( 13032, "can't use $not with $regex, use BSON regex type instead", !isNot );
+ if ( fe.type() == RegEx ){
+ regex = fe.regex();
+ flags = fe.regexFlags();
+ }
+ else {
+ regex = fe.valuestrsafe();
+ }
+ break;
+ }
+ case BSONObj::opOPTIONS:{
+ uassert( 13029, "can't use $not with $options, use BSON regex type instead", !isNot );
+ flags = fe.valuestrsafe();
+ break;
+ }
+ case BSONObj::opNEAR:
+ case BSONObj::opWITHIN:
+ break;
+ default:
+ uassert( 10069 , (string)"BUG - can't operator for: " + fn , 0 );
+ }
+ return true;
+ }
+
/* _jsobj - the query pattern
*/
Matcher::Matcher(const BSONObj &_jsobj, const BSONObj &constrainIndexKey) :
- where(0), jsobj(_jsobj), haveSize(), all(), hasArray(0), _atomic(false), nRegex(0) {
+ where(0), jsobj(_jsobj), haveSize(), all(), hasArray(0), haveNeg(), _atomic(false), nRegex(0) {
BSONObjIterator i(jsobj);
while ( i.more() ) {
@@ -171,15 +318,7 @@ namespace mongo {
}
if ( e.type() == RegEx ) {
- if ( nRegex >= 4 ) {
- out() << "ERROR: too many regexes in query" << endl;
- }
- else {
- RegexMatcher& rm = regexs[nRegex];
- rm.re = new pcrecpp::RE(e.regex(), flags2options(e.regexFlags()));
- rm.fieldName = e.fieldName();
- nRegex++;
- }
+ addRegex( e.fieldName(), e.regex(), e.regexFlags() );
continue;
}
@@ -200,75 +339,31 @@ namespace mongo {
const char *fn = fe.fieldName();
if ( fn[0] == '$' && fn[1] ) {
- int op = fe.getGtLtOp( -1 );
-
- if ( op == -1 ){
- if ( fn[1] == 'r' && fn[2] == 'e' && fn[3] == 'f' && fn[4] == 0 ){
- break; // { $ref : xxx } - treat as normal object
- }
- uassert( 10068 , (string)"invalid operator: " + fn , op != -1 );
- }
-
isOperator = true;
- switch ( op ){
- case BSONObj::GT:
- case BSONObj::GTE:
- case BSONObj::LT:
- case BSONObj::LTE:{
- shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
- _builders.push_back( b );
- b->appendAs(fe, e.fieldName());
- addBasic(b->done().firstElement(), op);
- isOperator = true;
- break;
- }
- case BSONObj::NE:{
- shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
- _builders.push_back( b );
- b->appendAs(fe, e.fieldName());
- addBasic(b->done().firstElement(), BSONObj::NE);
- break;
- }
- case BSONObj::opALL:
- all = true;
- case BSONObj::opIN:
- case BSONObj::NIN:
- basics.push_back( ElementMatcher( e , op , fe.embeddedObject() ) );
- break;
- case BSONObj::opMOD:
- case BSONObj::opTYPE:
- case BSONObj::opELEM_MATCH:
- // these are types where ElementMatcher has all the info
- basics.push_back( ElementMatcher( e , op ) );
- break;
- case BSONObj::opSIZE:{
- shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
- _builders.push_back( b );
- b->appendAs(fe, e.fieldName());
- addBasic(b->done().firstElement(), BSONObj::opSIZE);
- haveSize = true;
- break;
- }
- case BSONObj::opEXISTS:{
- shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
- _builders.push_back( b );
- b->appendAs(fe, e.fieldName());
- addBasic(b->done().firstElement(), BSONObj::opEXISTS);
- break;
- }
- case BSONObj::opREGEX:{
- regex = fe.valuestrsafe();
- break;
- }
- case BSONObj::opOPTIONS:{
- flags = fe.valuestrsafe();
- break;
- }
- default:
- uassert( 10069 , (string)"BUG - can't operator for: " + fn , 0 );
+ if ( fn[1] == 'n' && fn[2] == 'o' && fn[3] == 't' && fn[4] == 0 ) {
+ haveNeg = true;
+ switch( fe.type() ) {
+ case Object: {
+ BSONObjIterator k( fe.embeddedObject() );
+ uassert( 13030, "$not cannot be empty", k.more() );
+ while( k.more() ) {
+ addOp( e, k.next(), true, regex, flags );
+ }
+ break;
+ }
+ case RegEx:
+ addRegex( e.fieldName(), fe.regex(), fe.regexFlags(), true );
+ break;
+ default:
+ uassert( 13031, "invalid use of $not", false );
+ }
+ } else {
+ if ( !addOp( e, fe, false, regex, flags ) ) {
+ isOperator = false;
+ break;
+ }
}
-
}
else {
isOperator = false;
@@ -276,14 +371,7 @@ namespace mongo {
}
}
if (regex){
- if ( nRegex >= 4 ) {
- out() << "ERROR: too many regexes in query" << endl;
- } else {
- RegexMatcher& rm = regexs[nRegex];
- rm.re = new pcrecpp::RE(regex, flags2options(flags));
- rm.fieldName = e.fieldName();
- nRegex++;
- }
+ addRegex(e.fieldName(), regex, flags);
}
if ( isOperator )
continue;
@@ -298,21 +386,46 @@ namespace mongo {
}
// normal, simple case e.g. { a : "foo" }
- addBasic(e, BSONObj::Equality);
+ addBasic(e, BSONObj::Equality, false);
}
constrainIndexKey_ = constrainIndexKey;
}
-
+
+ inline bool regexMatches(const RegexMatcher& rm, const BSONElement& e) {
+ switch (e.type()){
+ case String:
+ case Symbol:
+ if (rm.prefix.empty())
+ return rm.re->PartialMatch(e.valuestr());
+ else
+ return !strncmp(e.valuestr(), rm.prefix.c_str(), rm.prefix.size());
+ case RegEx:
+ return !strcmp(rm.regex, e.regex()) && !strcmp(rm.flags, e.regexFlags());
+ default:
+ return false;
+ }
+ }
+
inline int Matcher::valuesMatch(const BSONElement& l, const BSONElement& r, int op, const ElementMatcher& bm) {
assert( op != BSONObj::NE && op != BSONObj::NIN );
- if ( op == BSONObj::Equality )
+ if ( op == BSONObj::Equality ) {
return l.valuesEqual(r);
+ }
if ( op == BSONObj::opIN ) {
// { $in : [1,2,3] }
- return bm.myset->count(l);
+ int count = bm.myset->count(l);
+ if ( count )
+ return count;
+ if ( bm.myregex.get() ) {
+ for( vector<RegexMatcher>::const_iterator i = bm.myregex->begin(); i != bm.myregex->end(); ++i ) {
+ if ( regexMatches( *i, l ) ) {
+ return true;
+ }
+ }
+ }
}
if ( op == BSONObj::opSIZE ) {
@@ -350,8 +463,8 @@ namespace mongo {
return (op & z);
}
- int Matcher::matchesNe(const char *fieldName, const BSONElement &toMatch, const BSONObj &obj, const ElementMatcher& bm ) {
- int ret = matchesDotted( fieldName, toMatch, obj, BSONObj::Equality, bm );
+ int Matcher::matchesNe(const char *fieldName, const BSONElement &toMatch, const BSONObj &obj, const ElementMatcher& bm , MatchDetails * details ) {
+ int ret = matchesDotted( fieldName, toMatch, obj, BSONObj::Equality, bm , false , details );
if ( bm.toMatch.type() != jstNULL )
return ( ret <= 0 ) ? 1 : 0;
else
@@ -383,16 +496,44 @@ namespace mongo {
0 missing element
1 match
*/
- int Matcher::matchesDotted(const char *fieldName, const BSONElement& toMatch, const BSONObj& obj, int compareOp, const ElementMatcher& bm , bool isArr) {
-
+ int Matcher::matchesDotted(const char *fieldName, const BSONElement& toMatch, const BSONObj& obj, int compareOp, const ElementMatcher& em , bool isArr, MatchDetails * details ) {
+ DEBUGMATCHER( "\t matchesDotted : " << fieldName << " hasDetails: " << ( details ? "yes" : "no" ) );
if ( compareOp == BSONObj::opALL ) {
- if ( bm.myset->size() == 0 )
+
+ if ( em.allMatchers.size() ){
+ BSONElement e = obj.getFieldDotted( fieldName );
+ uassert( 13021 , "$all/$elemMatch needs to be applied to array" , e.type() == Array );
+
+ for ( unsigned i=0; i<em.allMatchers.size(); i++ ){
+ bool found = false;
+ BSONObjIterator x( e.embeddedObject() );
+ while ( x.more() ){
+ BSONElement f = x.next();
+
+ if ( f.type() != Object )
+ continue;
+ if ( em.allMatchers[i]->matches( f.embeddedObject() ) ){
+ found = true;
+ break;
+ }
+ }
+
+ if ( ! found )
+ return -1;
+ }
+
+ return 1;
+ }
+
+ if ( em.myset->size() == 0 && !em.myregex.get() )
return -1; // is this desired?
+
BSONObjSetDefaultOrder actualKeys;
IndexSpec( BSON( fieldName << 1 ) ).getKeys( obj, actualKeys );
if ( actualKeys.size() == 0 )
return 0;
- for( set< BSONElement, element_lt >::const_iterator i = bm.myset->begin(); i != bm.myset->end(); ++i ) {
+
+ for( set< BSONElement, element_lt >::const_iterator i = em.myset->begin(); i != em.myset->end(); ++i ) {
// ignore nulls
if ( i->type() == jstNULL )
continue;
@@ -402,17 +543,44 @@ namespace mongo {
if ( !actualKeys.count( b.done() ) )
return -1;
}
- return 1;
- }
+ if ( !em.myregex.get() )
+ return 1;
+
+ for( vector< RegexMatcher >::const_iterator i = em.myregex->begin(); i != em.myregex->end(); ++i ) {
+ bool match = false;
+ for( BSONObjSetDefaultOrder::const_iterator j = actualKeys.begin(); j != actualKeys.end(); ++j ) {
+ if ( regexMatches( *i, j->firstElement() ) ) {
+ match = true;
+ break;
+ }
+ }
+ if ( !match )
+ return -1;
+ }
+
+ return 1;
+ } // end opALL
+
if ( compareOp == BSONObj::NE )
- return matchesNe( fieldName, toMatch, obj, bm );
+ return matchesNe( fieldName, toMatch, obj, em , details );
if ( compareOp == BSONObj::NIN ) {
- for( set<BSONElement,element_lt>::const_iterator i = bm.myset->begin(); i != bm.myset->end(); ++i ) {
- int ret = matchesNe( fieldName, *i, obj, bm );
+ for( set<BSONElement,element_lt>::const_iterator i = em.myset->begin(); i != em.myset->end(); ++i ) {
+ int ret = matchesNe( fieldName, *i, obj, em , details );
if ( ret != 1 )
return ret;
}
+ if ( em.myregex.get() ) {
+ BSONElementSet s;
+ obj.getFieldsDotted( fieldName, s );
+ for( vector<RegexMatcher>::const_iterator i = em.myregex->begin(); i != em.myregex->end(); ++i ) {
+ for( BSONElementSet::const_iterator j = s.begin(); j != s.end(); ++j ) {
+ if ( regexMatches( *i, *j ) ) {
+ return -1;
+ }
+ }
+ }
+ }
return 1;
}
@@ -420,49 +588,73 @@ namespace mongo {
bool indexed = !constrainIndexKey_.isEmpty();
if ( indexed ) {
e = obj.getFieldUsingIndexNames(fieldName, constrainIndexKey_);
- assert( !e.eoo() );
+ if( e.eoo() ){
+ cout << "obj: " << obj << endl;
+ cout << "fieldName: " << fieldName << endl;
+ cout << "constrainIndexKey_: " << constrainIndexKey_ << endl;
+ assert( !e.eoo() );
+ }
} else {
+
+ const char *p = strchr(fieldName, '.');
+ if ( p ) {
+ string left(fieldName, p-fieldName);
+
+ BSONElement se = obj.getField(left.c_str());
+ if ( se.eoo() )
+ ;
+ else if ( se.type() != Object && se.type() != Array )
+ ;
+ else {
+ BSONObj eo = se.embeddedObject();
+ return matchesDotted(p+1, toMatch, eo, compareOp, em, se.type() == Array , details );
+ }
+ }
+
if ( isArr ) {
+ DEBUGMATCHER( "\t\t isArr 1 : obj : " << obj );
BSONObjIterator ai(obj);
bool found = false;
while ( ai.moreWithEOO() ) {
BSONElement z = ai.next();
+
+ if( strcmp(z.fieldName(),fieldName) == 0 && valuesMatch(z, toMatch, compareOp, em) ) {
+ // "field.<n>" array notation was used
+ if ( details )
+ details->elemMatchKey = z.fieldName();
+ return 1;
+ }
+
if ( z.type() == Object ) {
BSONObj eo = z.embeddedObject();
- int cmp = matchesDotted(fieldName, toMatch, eo, compareOp, bm, false);
+ int cmp = matchesDotted(fieldName, toMatch, eo, compareOp, em, false, details );
if ( cmp > 0 ) {
+ if ( details )
+ details->elemMatchKey = z.fieldName();
return 1;
- } else if ( cmp < 0 ) {
+ }
+ else if ( cmp < 0 ) {
found = true;
}
}
}
- return found ? -1 : retMissing( bm );
+ return found ? -1 : retMissing( em );
}
- const char *p = strchr(fieldName, '.');
- if ( p ) {
- string left(fieldName, p-fieldName);
- BSONElement se = obj.getField(left.c_str());
- if ( se.eoo() )
- return retMissing( bm );
- if ( se.type() != Object && se.type() != Array )
- return retMissing( bm );
-
- BSONObj eo = se.embeddedObject();
- return matchesDotted(p+1, toMatch, eo, compareOp, bm, se.type() == Array);
- } else {
+ if( p ) {
+ return retMissing( em );
+ }
+ else {
e = obj.getField(fieldName);
}
}
if ( compareOp == BSONObj::opEXISTS ) {
- return ( e.eoo() ^ toMatch.boolean() ) ? 1 : -1;
+ return ( e.eoo() ^ ( toMatch.boolean() ^ em.isNot ) ) ? 1 : -1;
} else if ( ( e.type() != Array || indexed || compareOp == BSONObj::opSIZE ) &&
- valuesMatch(e, toMatch, compareOp, bm ) ) {
+ valuesMatch(e, toMatch, compareOp, em ) ) {
return 1;
} else if ( e.type() == Array && compareOp != BSONObj::opSIZE ) {
-
BSONObjIterator ai(e.embeddedObject());
while ( ai.moreWithEOO() ) {
@@ -470,18 +662,23 @@ namespace mongo {
if ( compareOp == BSONObj::opELEM_MATCH ){
// SERVER-377
- if ( z.type() == Object && bm.subMatcher->matches( z.embeddedObject() ) )
+ if ( z.type() == Object && em.subMatcher->matches( z.embeddedObject() ) ){
+ if ( details )
+ details->elemMatchKey = z.fieldName();
return 1;
+ }
}
else {
- if ( valuesMatch( z, toMatch, compareOp, bm) ) {
+ if ( valuesMatch( z, toMatch, compareOp, em) ) {
+ if ( details )
+ details->elemMatchKey = z.fieldName();
return 1;
}
}
}
- if ( compareOp == BSONObj::Equality && e.woCompare( toMatch ) == 0 ){
+ if ( compareOp == BSONObj::Equality && e.woCompare( toMatch , false ) == 0 ){
// match an entire array to itself
return 1;
}
@@ -496,27 +693,9 @@ namespace mongo {
extern int dump;
- inline bool regexMatches(RegexMatcher& rm, const BSONElement& e) {
- char buf[64];
- const char *p = buf;
- if ( e.type() == String || e.type() == Symbol )
- p = e.valuestr();
- else if ( e.isNumber() ) {
- sprintf(buf, "%f", e.number());
- }
- else if ( e.type() == Date ) {
- Date_t d = e.date();
- time_t t = (d.millis/1000);
- time_t_to_String(t, buf);
- }
- else
- return false;
- return rm.re->PartialMatch(p);
- }
-
/* See if an object matches the query.
*/
- bool Matcher::matches(const BSONObj& jsobj ) {
+ bool Matcher::matches(const BSONObj& jsobj , MatchDetails * details ) {
/* assuming there is usually only one thing to match. if more this
could be slow sometimes. */
@@ -525,17 +704,21 @@ namespace mongo {
ElementMatcher& bm = basics[i];
BSONElement& m = bm.toMatch;
// -1=mismatch. 0=missing element. 1=match
- int cmp = matchesDotted(m.fieldName(), m, jsobj, bm.compareOp, bm );
+ int cmp = matchesDotted(m.fieldName(), m, jsobj, bm.compareOp, bm , false , details );
+ if ( bm.compareOp != BSONObj::opEXISTS && bm.isNot )
+ cmp = -cmp;
if ( cmp < 0 )
return false;
if ( cmp == 0 ) {
/* missing is ok iff we were looking for null */
if ( m.type() == jstNULL || m.type() == Undefined ) {
- if ( bm.compareOp == BSONObj::NE ) {
+ if ( ( bm.compareOp == BSONObj::NE ) ^ bm.isNot ) {
return false;
}
} else {
- return false;
+ if ( !bm.isNot ) {
+ return false;
+ }
}
}
}
@@ -554,7 +737,7 @@ namespace mongo {
for( BSONElementSet::const_iterator i = s.begin(); i != s.end(); ++i )
if ( regexMatches(rm, *i) )
match = true;
- if ( !match )
+ if ( !match ^ rm.isNot )
return false;
}
@@ -590,6 +773,13 @@ namespace mongo {
return true;
}
+ bool Matcher::hasType( BSONObj::MatchType type ) const {
+ for ( unsigned i=0; i<basics.size() ; i++ )
+ if ( basics[i].compareOp == type )
+ return true;
+ return false;
+ }
+
struct JSObj1 js1;
#pragma pack(1)
diff --git a/db/matcher.h b/db/matcher.h
index f1609f9..3839b68 100644
--- a/db/matcher.h
+++ b/db/matcher.h
@@ -31,13 +31,12 @@ namespace mongo {
class RegexMatcher {
public:
const char *fieldName;
- pcrecpp::RE *re;
- RegexMatcher() {
- re = 0;
- }
- ~RegexMatcher() {
- delete re;
- }
+ const char *regex;
+ const char *flags;
+ string prefix;
+ shared_ptr< pcrecpp::RE > re;
+ bool isNot;
+ RegexMatcher() : isNot() {}
};
struct element_lt
@@ -58,24 +57,17 @@ namespace mongo {
ElementMatcher() {
}
- ElementMatcher( BSONElement _e , int _op );
+ ElementMatcher( BSONElement _e , int _op, bool _isNot );
- ElementMatcher( BSONElement _e , int _op , const BSONObj& array ) : toMatch( _e ) , compareOp( _op ) {
-
- myset.reset( new set<BSONElement,element_lt>() );
-
- BSONObjIterator i( array );
- while ( i.more() ) {
- BSONElement ie = i.next();
- myset->insert(ie);
- }
- }
+ ElementMatcher( BSONElement _e , int _op , const BSONObj& array, bool _isNot );
- ~ElementMatcher();
+ ~ElementMatcher() { }
BSONElement toMatch;
int compareOp;
+ bool isNot;
shared_ptr< set<BSONElement,element_lt> > myset;
+ shared_ptr< vector<RegexMatcher> > myregex;
// these are for specific operators
int mod;
@@ -83,12 +75,34 @@ namespace mongo {
BSONType type;
shared_ptr<Matcher> subMatcher;
+
+ vector< shared_ptr<Matcher> > allMatchers;
};
-// SQL where clause equivalent
- class Where;
+ class Where; // used for $where javascript eval
class DiskLoc;
+ struct MatchDetails {
+ MatchDetails(){
+ reset();
+ }
+
+ void reset(){
+ loadedObject = false;
+ elemMatchKey = 0;
+ }
+
+ string toString() const {
+ stringstream ss;
+ ss << "loadedObject: " << loadedObject << " ";
+ ss << "elemMatchKey: " << ( elemMatchKey ? elemMatchKey : "NULL" ) << " ";
+ return ss.str();
+ }
+
+ bool loadedObject;
+ const char * elemMatchKey; // warning, this may go out of scope if matched object does
+ };
+
/* Match BSON objects against a query pattern.
e.g.
@@ -107,12 +121,12 @@ namespace mongo {
int matchesDotted(
const char *fieldName,
const BSONElement& toMatch, const BSONObj& obj,
- int compareOp, const ElementMatcher& bm, bool isArr = false);
+ int compareOp, const ElementMatcher& bm, bool isArr , MatchDetails * details );
int matchesNe(
const char *fieldName,
const BSONElement &toMatch, const BSONObj &obj,
- const ElementMatcher&bm);
+ const ElementMatcher&bm, MatchDetails * details );
public:
static int opDirection(int op) {
@@ -125,30 +139,34 @@ namespace mongo {
~Matcher();
- bool matches(const BSONObj& j);
+ bool matches(const BSONObj& j, MatchDetails * details = 0 );
- bool keyMatch() const { return !all && !haveSize && !hasArray; }
+ bool keyMatch() const { return !all && !haveSize && !hasArray && !haveNeg; }
bool atomic() const { return _atomic; }
+ bool hasType( BSONObj::MatchType type ) const;
private:
- void addBasic(const BSONElement &e, int c) {
+ void addBasic(const BSONElement &e, int c, bool isNot) {
// TODO May want to selectively ignore these element types based on op type.
if ( e.type() == MinKey || e.type() == MaxKey )
return;
- basics.push_back( ElementMatcher( e , c ) );
+ basics.push_back( ElementMatcher( e , c, isNot ) );
}
+ void addRegex(const char *fieldName, const char *regex, const char *flags, bool isNot = false);
+ bool addOp( const BSONElement &e, const BSONElement &fe, bool isNot, const char *& regex, const char *&flags );
+
int valuesMatch(const BSONElement& l, const BSONElement& r, int op, const ElementMatcher& bm);
Where *where; // set if query uses $where
BSONObj jsobj; // the query pattern. e.g., { name: "joe" }
BSONObj constrainIndexKey_;
vector<ElementMatcher> basics;
-// int n; // # of basicmatcher items
bool haveSize;
bool all;
bool hasArray;
+ bool haveNeg;
/* $atomic - if true, a multi document operation (some removes, updates)
should be done atomically. in that case, we do not yield -
@@ -171,7 +189,7 @@ namespace mongo {
public:
CoveredIndexMatcher(const BSONObj &pattern, const BSONObj &indexKeyPattern);
bool matches(const BSONObj &o){ return _docMatcher.matches( o ); }
- bool matches(const BSONObj &key, const DiskLoc &recLoc);
+ bool matches(const BSONObj &key, const DiskLoc &recLoc , MatchDetails * details = 0 );
bool needRecord(){ return _needRecord; }
Matcher& docMatcher() { return _docMatcher; }
diff --git a/db/module.cpp b/db/module.cpp
index d218fe6..78f8f79 100644
--- a/db/module.cpp
+++ b/db/module.cpp
@@ -1,4 +1,20 @@
// module.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
#include "stdafx.h"
#include "module.h"
diff --git a/db/modules/mms.cpp b/db/modules/mms.cpp
index 9c00e60..248a4e4 100644
--- a/db/modules/mms.cpp
+++ b/db/modules/mms.cpp
@@ -1,4 +1,20 @@
// mms.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
#include "stdafx.h"
#include "../db.h"
@@ -6,6 +22,7 @@
#include "../module.h"
#include "../../util/httpclient.h"
#include "../../util/background.h"
+#include "../commands.h"
namespace po = boost::program_options;
@@ -13,24 +30,26 @@ namespace mongo {
/** Mongo Monitoring Service
if enabled, this runs in the background ands pings mss
- */
+ */
class MMS : public BackgroundJob , Module {
public:
MMS()
- : Module( "mms" ) , _baseurl( "http://mms.10gen.com/ping/" ) ,
+ : Module( "mms" ) , _baseurl( "" ) ,
_secsToSleep(1) , _token( "" ) , _name( "" ) {
add_options()
+ ( "mms-url" , po::value<string>()->default_value("http://mms.10gen.com/ping") , "url for mongo monitoring server" )
( "mms-token" , po::value<string>() , "account token for mongo monitoring server" )
- ( "mms-name" , po::value<string>() , "server name mongo monitoring server" )
- ( "mms-interval" , po::value<int>()->default_value(30) , "ping interval for mongo monitoring server" )
+ ( "mms-name" , po::value<string>() , "server name for mongo monitoring server" )
+ ( "mms-interval" , po::value<int>()->default_value(30) , "ping interval (in seconds) for mongo monitoring server" )
;
}
~MMS(){}
-
+
void config( program_options::variables_map& params ){
+ _baseurl = params["mms-url"].as<string>();
if ( params.count( "mms-token" ) ){
_token = params["mms-token"].as<string>();
}
@@ -41,87 +60,94 @@ namespace mongo {
}
void run(){
- if ( _token.size() == 0 && _name.size() == 0 ){
- log(1) << "mms not configured" << endl;
- return;
- }
-
- if ( _token.size() == 0 ){
- log() << "no token for mms - not running" << endl;
- return;
- }
-
- if ( _name.size() == 0 ){
- log() << "no name for mms - not running" << endl;
- return;
- }
-
- log() << "mms monitor staring... token:" << _token << " name:" << _name << " interval: " << _secsToSleep << endl;
-
- unsigned long long lastTime = 0;
- unsigned long long lastLockTime = 0;
-
- while ( ! inShutdown() ){
- sleepsecs( _secsToSleep );
-
- stringstream url;
- url << _baseurl << _token << "?";
- url << "monitor_name=" << _name << "&";
- url << "version=" << versionString << "&";
- url << "git_hash=" << gitVersion() << "&";
+ if ( _token.size() == 0 && _name.size() == 0 ){
+ log(1) << "mms not configured" << endl;
+ return;
+ }
- { //percent_locked
- unsigned long long time = curTimeMicros64();
- unsigned long long start , lock;
- dbMutex.info().getTimingInfo( start , lock );
- if ( lastTime ){
- double timeDiff = (double) (time - lastTime);
- double lockDiff = (double) (lock - lastLockTime);
- url << "percent_locked=" << (int)ceil( 100 * ( lockDiff / timeDiff ) ) << "&";
- }
- lastTime = time;
- lastLockTime = lock;
+ if ( _token.size() == 0 ){
+ log() << "no token for mms - not running" << endl;
+ return;
}
-
- vector< string > dbNames;
- getDatabaseNames( dbNames );
- boost::intmax_t totalSize = 0;
- for ( vector< string >::iterator i = dbNames.begin(); i != dbNames.end(); ++i ) {
- boost::intmax_t size = dbSize( i->c_str() );
- totalSize += size;
+
+ if ( _name.size() == 0 ){
+ log() << "no name for mms - not running" << endl;
+ return;
}
- url << "data_size=" << totalSize / ( 1024 * 1024 ) << "&";
-
+ log() << "mms monitor staring... token:" << _token << " name:" << _name << " interval: " << _secsToSleep << endl;
+ Client::initThread( "mms" );
+ Client& c = cc();
- /* TODO:
- message_operations
- update_operations
- insert_operations
- get_more_operations
- delete_operations
- kill_cursors_operations
- */
-
- log(1) << "mms url: " << url.str() << endl;
+ // TODO: using direct client is bad, but easy for now
- try {
- HttpClient c;
- map<string,string> headers;
- stringstream ss;
- int rc = c.get( url.str() , headers , ss );
- log(1) << "\t response code: " << rc << endl;
- if ( rc != 200 ){
- log() << "mms error response code:" << rc << endl;
- log(1) << "mms error body:" << ss.str() << endl;
+ while ( ! inShutdown() ){
+ sleepsecs( _secsToSleep );
+
+ try {
+ stringstream url;
+ url << _baseurl << "?"
+ << "token=" << _token << "&"
+ << "name=" << _name << "&"
+ << "ts=" << time(0)
+ ;
+
+ BSONObjBuilder bb;
+ // duplicated so the post has everything
+ bb.append( "token" , _token );
+ bb.append( "name" , _name );
+ bb.appendDate( "ts" , jsTime() );
+
+ // any commands
+ _add( bb , "buildinfo" );
+ _add( bb , "serverStatus" );
+
+ BSONObj postData = bb.obj();
+
+ log(1) << "mms url: " << url.str() << "\n\t post: " << postData << endl;;
+
+ HttpClient c;
+ HttpClient::Result r;
+ int rc = c.post( url.str() , postData.jsonString() , &r );
+ log(1) << "\t response code: " << rc << endl;
+ if ( rc != 200 ){
+ log() << "mms error response code:" << rc << endl;
+ log(1) << "mms error body:" << r.getEntireResponse() << endl;
+ }
+ }
+ catch ( std::exception& e ){
+ log() << "mms exception: " << e.what() << endl;
}
}
- catch ( std::exception& e ){
- log() << "mms get exception: " << e.what() << endl;
- }
+
+ c.shutdown();
}
+
+ void _add( BSONObjBuilder& postData , const char* cmd ){
+ Command * c = Command::findCommand( cmd );
+ if ( ! c ){
+ log() << "MMS can't find command: " << cmd << endl;
+ postData.append( cmd , "can't find command" );
+ return;
+ }
+
+ if ( c->locktype() ){
+ log() << "MMS can only use noLocking commands not: " << cmd << endl;
+ postData.append( cmd , "not noLocking" );
+ return;
+ }
+
+ BSONObj co = BSON( cmd << 1 );
+
+ string errmsg;
+ BSONObjBuilder sub;
+ if ( ! c->run( "admin.$cmd" , co , errmsg , sub , false ) )
+ postData.append( cmd , errmsg );
+ else
+ postData.append( cmd , sub.obj() );
}
+
void init(){ go(); }
@@ -135,8 +161,8 @@ namespace mongo {
string _token;
string _name;
-
- } /* mms */;
+
+ } /*mms*/ ;
}
diff --git a/db/mr.cpp b/db/mr.cpp
index ff88d9e..210dfca 100644
--- a/db/mr.cpp
+++ b/db/mr.cpp
@@ -28,6 +28,8 @@ namespace mongo {
namespace mr {
+ typedef vector<BSONObj> BSONList;
+
class MyCmp {
public:
MyCmp(){}
@@ -38,48 +40,76 @@ namespace mongo {
typedef pair<BSONObj,BSONObj> Data;
//typedef list< Data > InMemory;
- typedef map< BSONObj,list<BSONObj>,MyCmp > InMemory;
+ typedef map< BSONObj,BSONList,MyCmp > InMemory;
- BSONObj reduceValues( list<BSONObj>& values , Scope * s , ScriptingFunction reduce , bool final , ScriptingFunction finalize ){
+ BSONObj reduceValues( BSONList& values , Scope * s , ScriptingFunction reduce , bool final , ScriptingFunction finalize ){
uassert( 10074 , "need values" , values.size() );
int sizeEstimate = ( values.size() * values.begin()->getField( "value" ).size() ) + 128;
BSONObj key;
BSONObjBuilder reduceArgs( sizeEstimate );
-
- BSONObjBuilder valueBuilder( sizeEstimate );
- int n = 0;
- for ( list<BSONObj>::iterator i=values.begin(); i!=values.end(); i++){
- BSONObj o = *i;
- BSONObjIterator j(o);
+ BSONArrayBuilder * valueBuilder = 0;
+
+ int sizeSoFar = 0;
+ unsigned n = 0;
+ for ( ; n<values.size(); n++ ){
+ BSONObjIterator j(values[n]);
BSONElement keyE = j.next();
if ( n == 0 ){
reduceArgs.append( keyE );
- BSONObjBuilder temp;
- temp.append( keyE );
- key = temp.obj();
+ key = keyE.wrap();
+ valueBuilder = new BSONArrayBuilder( reduceArgs.subarrayStart( "values" ) );
+ sizeSoFar = 5 + keyE.size();
}
- valueBuilder.appendAs( j.next() , BSONObjBuilder::numStr( n++ ).c_str() );
+
+ BSONElement ee = j.next();
+
+ uassert( 13070 , "value to large to reduce" , ee.size() < ( 2 * 1024 * 1024 ) );
+
+ if ( sizeSoFar + ee.size() > ( 4 * 1024 * 1024 ) ){
+ assert( n > 1 ); // if not, inf. loop
+ break;
+ }
+
+ valueBuilder->append( ee );
+ sizeSoFar += ee.size();
}
-
- reduceArgs.appendArray( "values" , valueBuilder.obj() );
+ assert(valueBuilder);
+ valueBuilder->done();
+ delete valueBuilder;
BSONObj args = reduceArgs.obj();
-
+
s->invokeSafe( reduce , args );
if ( s->type( "return" ) == Array ){
uassert( 10075 , "reduce -> multiple not supported yet",0);
return BSONObj();
}
+
+ int endSizeEstimate = key.objsize() + ( args.objsize() / values.size() );
+
+ if ( n < values.size() ){
+ BSONList x;
+ for ( ; n < values.size(); n++ ){
+ x.push_back( values[n] );
+ }
+ BSONObjBuilder temp( endSizeEstimate );
+ temp.append( key.firstElement() );
+ s->append( temp , "1" , "return" );
+ x.push_back( temp.obj() );
+ return reduceValues( x , s , reduce , final , finalize );
+ }
+
+
if ( finalize ){
- BSONObjBuilder b;
+ BSONObjBuilder b(endSizeEstimate);
b.appendAs( key.firstElement() , "_id" );
s->append( b , "value" , "return" );
s->invokeSafe( finalize , b.obj() );
}
- BSONObjBuilder b;
+ BSONObjBuilder b(endSizeEstimate);
b.appendAs( key.firstElement() , final ? "_id" : "0" );
s->append( b , final ? "value" : "1" , "return" );
return b.obj();
@@ -108,8 +138,12 @@ namespace mongo {
if ( ! keeptemp && markAsTemp )
cc().addTempCollection( tempLong );
- if ( cmdObj["out"].type() == String )
+ replicate = keeptemp;
+
+ if ( cmdObj["out"].type() == String ){
finalShort = cmdObj["out"].valuestr();
+ replicate = true;
+ }
else
finalShort = tempShort;
@@ -123,8 +157,10 @@ namespace mongo {
if ( cmdObj["finalize"].type() ){
finalizeCode = cmdObj["finalize"].ascode();
}
+ checkCodeWScope( "map" , cmdObj );
+ checkCodeWScope( "reduce" , cmdObj );
+ checkCodeWScope( "finalize" , cmdObj );
-
if ( cmdObj["mapparams"].type() == Array ){
mapparams = cmdObj["mapparams"].embeddedObjectUserCheck();
}
@@ -151,6 +187,14 @@ namespace mongo {
}
}
+ void checkCodeWScope( const char * field , const BSONObj& o ){
+ BSONElement e = o[field];
+ if ( e.type() != CodeWScope )
+ return;
+ BSONObj x = e.codeWScopeObject();
+ uassert( 13035 , (string)"can't use CodeWScope with map/reduce function: " + field , x.isEmpty() );
+ }
+
/**
@return number objects in collection
*/
@@ -171,6 +215,7 @@ namespace mongo {
// options
bool verbose;
bool keeptemp;
+ bool replicate;
// query options
@@ -224,12 +269,13 @@ namespace mongo {
db.dropCollection( setup.incLong );
writelock l( setup.incLong );
+ Client::Context ctx( setup.incLong );
string err;
assert( userCreateNS( setup.incLong.c_str() , BSON( "autoIndexId" << 0 ) , err , false ) );
}
- void finalReduce( list<BSONObj>& values ){
+ void finalReduce( BSONList& values ){
if ( values.size() == 0 )
return;
@@ -237,7 +283,11 @@ namespace mongo {
BSONObj res = reduceValues( values , scope.get() , reduce , 1 , finalize );
writelock l( setup.tempLong );
- theDataFileMgr.insertAndLog( setup.tempLong.c_str() , res , false );
+ Client::Context ctx( setup.incLong );
+ if ( setup.replicate )
+ theDataFileMgr.insertAndLog( setup.tempLong.c_str() , res , false );
+ else
+ theDataFileMgr.insert( setup.tempLong.c_str() , res , false );
}
@@ -272,7 +322,7 @@ namespace mongo {
for ( InMemory::iterator i=old->begin(); i!=old->end(); i++ ){
BSONObj key = i->first;
- list<BSONObj>& all = i->second;
+ BSONList& all = i->second;
if ( all.size() == 1 ){
// this key has low cardinality, so just write to db
@@ -291,13 +341,14 @@ namespace mongo {
void dump(){
writelock l(_state.setup.incLong);
+ Client::Context ctx(_state.setup.incLong);
for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); i++ ){
- list<BSONObj>& all = i->second;
+ BSONList& all = i->second;
if ( all.size() < 1 )
continue;
- for ( list<BSONObj>::iterator j=all.begin(); j!=all.end(); j++ )
+ for ( BSONList::iterator j=all.begin(); j!=all.end(); j++ )
write( *j );
}
_temp->clear();
@@ -306,7 +357,7 @@ namespace mongo {
}
void insert( const BSONObj& a ){
- list<BSONObj>& all = (*_temp)[a];
+ BSONList& all = (*_temp)[a];
all.push_back( a );
_size += a.objsize() + 16;
}
@@ -343,7 +394,8 @@ namespace mongo {
boost::thread_specific_ptr<MRTL> _tlmr;
BSONObj fast_emit( const BSONObj& args ){
- uassert( 10077 , "fast_emit takes 2 args" , args.nFields() == 2 );
+ uassert( 10077 , "fast_emit takes 2 args" , args.nFields() == 2 );
+ uassert( 13069 , "an emit can't be more than 2mb" , args.objsize() < ( 2 * 1024 * 1024 ) );
_tlmr->insert( args );
_tlmr->numEmits++;
return BSONObj();
@@ -357,11 +409,14 @@ namespace mongo {
virtual void help( stringstream &help ) const {
help << "see http://www.mongodb.org/display/DOCS/MapReduce";
}
-
+ virtual LockType locktype(){ return WRITE; } // TODO, READ?
bool run(const char *dbname, BSONObj& cmd, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
Timer t;
Client::GodScope cg;
- MRSetup mr( cc().database()->name , cmd );
+ Client& client = cc();
+ CurOp * op = client.curop();
+
+ MRSetup mr( client.database()->name , cmd );
log(1) << "mr ns: " << mr.ns << endl;
@@ -385,7 +440,7 @@ namespace mongo {
MRTL * mrtl = new MRTL( state );
_tlmr.reset( mrtl );
- ProgressMeter pm( db.count( mr.ns , mr.filter ) );
+ ProgressMeter & pm = op->setMessage( "m/r: (1/3) emit phase" , db.count( mr.ns , mr.filter ) );
auto_ptr<DBClientCursor> cursor = db.query( mr.ns , mr.q );
long long mapTime = 0;
Timer mt;
@@ -405,6 +460,7 @@ namespace mongo {
Timer t;
mrtl->checkSize();
inReduce += t.micros();
+ killCurrentOp.checkForInterrupt();
dbtemprelease temprlease;
}
pm.hit();
@@ -412,9 +468,10 @@ namespace mongo {
if ( mr.limit && num >= mr.limit )
break;
}
+ pm.finished();
- countsBuilder.append( "input" , num );
- countsBuilder.append( "emit" , mrtl->numEmits );
+ countsBuilder.appendNumber( "input" , num );
+ countsBuilder.appendNumber( "emit" , mrtl->numEmits );
if ( mrtl->numEmits )
shouldHaveData = true;
@@ -422,7 +479,7 @@ namespace mongo {
timingBuilder.append( "emitLoop" , t.millis() );
// final reduce
-
+ op->setMessage( "m/r: (2/3) final reduce in memory" );
mrtl->reduceInMemory();
mrtl->dump();
@@ -430,16 +487,22 @@ namespace mongo {
db.ensureIndex( mr.incLong , sortKey );
BSONObj prev;
- list<BSONObj> all;
+ BSONList all;
- ProgressMeter fpm( db.count( mr.incLong ) );
+ assert( userCreateNS( mr.tempLong.c_str() , BSONObj() , errmsg , mr.replicate ) );
+
+ pm = op->setMessage( "m/r: (3/3) final reduce to collection" , db.count( mr.incLong ) );
cursor = db.query( mr.incLong, Query().sort( sortKey ) );
while ( cursor->more() ){
BSONObj o = cursor->next().getOwned();
-
+ pm.hit();
+
if ( o.woSortOrder( prev , sortKey ) == 0 ){
all.push_back( o );
+ if ( pm.hits() % 1000 == 0 ){
+ dbtemprelease tl;
+ }
continue;
}
@@ -448,12 +511,11 @@ namespace mongo {
all.clear();
prev = o;
all.push_back( o );
- fpm.hit();
+ killCurrentOp.checkForInterrupt();
dbtemprelease tl;
}
-
state.finalReduce( all );
-
+ pm.finished();
_tlmr.reset( 0 );
}
catch ( ... ){
@@ -471,7 +533,7 @@ namespace mongo {
result.append( "result" , mr.finalShort );
result.append( "timeMillis" , t.millis() );
- countsBuilder.append( "output" , finalCount );
+ countsBuilder.appendNumber( "output" , finalCount );
if ( mr.verbose ) result.append( "timing" , timingBuilder.obj() );
result.append( "counts" , countsBuilder.obj() );
@@ -493,11 +555,12 @@ namespace mongo {
public:
MapReduceFinishCommand() : Command( "mapreduce.shardedfinish" ){}
virtual bool slaveOk() { return true; }
-
+
+ virtual LockType locktype(){ return WRITE; }
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
- dbtemprelease temprlease; // we don't touch the db directly
-
- string dbname = cc().database()->name;
+ string dbname = cc().database()->name; // this has to come before dbtemprelease
+ dbtemprelease temprelease; // we don't touch the db directly
+
string shardedOutputCollection = cmdObj["shardedOutputCollection"].valuestrsafe();
MRSetup mr( dbname , cmdObj.firstElement().embeddedObjectUserCheck() , false );
@@ -540,14 +603,14 @@ namespace mongo {
if ( mr.finalizeCode.size() )
finalizeFunction = s->createFunction( mr.finalizeCode.c_str() );
- list<BSONObj> values;
+ BSONList values;
result.append( "result" , mr.finalShort );
DBDirectClient db;
while ( cursor.more() ){
- BSONObj t = cursor.next();
+ BSONObj t = cursor.next().getOwned();
if ( values.size() == 0 ){
values.push_back( t );
diff --git a/db/namespace.cpp b/db/namespace.cpp
index ecd5f64..210efb6 100644
--- a/db/namespace.cpp
+++ b/db/namespace.cpp
@@ -47,11 +47,43 @@ namespace mongo {
}
boost::filesystem::path NamespaceIndex::path() const {
- return boost::filesystem::path( dir_ ) / ( database_ + ".ns" );
+ boost::filesystem::path ret( dir_ );
+ if ( directoryperdb )
+ ret /= database_;
+ ret /= ( database_ + ".ns" );
+ return ret;
}
+ void NamespaceIndex::maybeMkdir() const {
+ if ( !directoryperdb )
+ return;
+ boost::filesystem::path dir( dir_ );
+ dir /= database_;
+ if ( !boost::filesystem::exists( dir ) )
+ BOOST_CHECK_EXCEPTION( boost::filesystem::create_directory( dir ) );
+ }
+
int lenForNewNsFiles = 16 * 1024 * 1024;
+ void NamespaceDetails::onLoad(const Namespace& k) {
+ if( k.isExtra() ) {
+ /* overflow storage for indexes - so don't treat as a NamespaceDetails object. */
+ return;
+ }
+
+ assertInWriteLock();
+ if( backgroundIndexBuildInProgress ) {
+ log() << "backgroundIndexBuildInProgress was " << backgroundIndexBuildInProgress << " for " << k << ", indicating an abnormal db shutdown" << endl;
+ backgroundIndexBuildInProgress = 0;
+ }
+ }
+
+ static void callback(const Namespace& k, NamespaceDetails& v) {
+ v.onLoad(k);
+ }
+
+ bool checkNsFilesOnLoad = true;
+
void NamespaceIndex::init() {
if ( ht )
return;
@@ -82,6 +114,7 @@ namespace mongo {
else {
// use lenForNewNsFiles, we are making a new database
massert( 10343 , "bad lenForNewNsFiles", lenForNewNsFiles >= 1024*1024 );
+ maybeMkdir();
long l = lenForNewNsFiles;
p = f.map(pathString.c_str(), l);
if( p ) {
@@ -95,6 +128,8 @@ namespace mongo {
dbexit( EXIT_FS );
}
ht = new HashTable<Namespace,NamespaceDetails>(p, len, "namespace index");
+ if( checkNsFilesOnLoad )
+ ht->iterAll(callback);
}
void NamespaceDetails::addDeletedRec(DeletedRecord *d, DiskLoc dloc) {
@@ -446,9 +481,14 @@ namespace mongo {
// signal done allocating new extents.
if ( !deletedList[ 1 ].isValid() )
deletedList[ 1 ] = DiskLoc();
-
+
assert( len < 400000000 );
int passes = 0;
+ int maxPasses = ( len / 30 ) + 2; // 30 is about the smallest entry that could go in the oplog
+ if ( maxPasses < 5000 ){
+ // this is for bacwards safety since 5000 was the old value
+ maxPasses = 5000;
+ }
DiskLoc loc;
// delete records until we have room and the max # objects limit achieved.
@@ -497,10 +537,10 @@ namespace mongo {
DiskLoc fr = theCapExtent()->firstRecord;
theDataFileMgr.deleteRecord(ns, fr.rec(), fr, true);
compact();
- if( ++passes >= 5000 ) {
- log() << "passes ns:" << ns << " len:" << len << '\n';
+ if( ++passes > maxPasses ) {
+ log() << "passes ns:" << ns << " len:" << len << " maxPasses: " << maxPasses << '\n';
log() << "passes max:" << max << " nrecords:" << nrecords << " datasize: " << datasize << endl;
- massert( 10345 , "passes >= 5000 in capped collection alloc", false );
+ massert( 10345 , "passes >= maxPasses in capped collection alloc", false );
}
}
@@ -512,7 +552,7 @@ namespace mongo {
}
/* you MUST call when adding an index. see pdfile.cpp */
- IndexDetails& NamespaceDetails::addIndex(const char *thisns) {
+ IndexDetails& NamespaceDetails::addIndex(const char *thisns, bool resetTransient) {
assert( nsdetails(thisns) == this );
if( nIndexes == NIndexesBase && extraOffset == 0 ) {
@@ -521,7 +561,8 @@ namespace mongo {
IndexDetails& id = idx(nIndexes);
nIndexes++;
- NamespaceDetailsTransient::get_w(thisns).addedIndex();
+ if ( resetTransient )
+ NamespaceDetailsTransient::get_w(thisns).addedIndex();
return id;
}
@@ -543,31 +584,39 @@ namespace mongo {
for ( int i = 0; i < nIndexes; i++ ) {
IndexDetails& idx = indexes[i];
BSONObj idxKey = idx.info.obj().getObjectField("key"); // e.g., { ts : -1 }
- if ( !idxKey.findElement(fieldName).eoo() )
+ if ( !idxKey.getField(fieldName).eoo() )
return i;
}*/
return -1;
}
- long long NamespaceDetails::storageSize(){
+ long long NamespaceDetails::storageSize( int * numExtents ){
Extent * e = firstExtent.ext();
assert( e );
long long total = 0;
+ int n = 0;
while ( e ){
- total += e->length;
- e = e->getNextExtent();
+ total += e->length;
+ e = e->getNextExtent();
+ n++;
}
+
+ if ( numExtents )
+ *numExtents = n;
+
return total;
}
/* ------------------------------------------------------------------------- */
- boost::mutex NamespaceDetailsTransient::_qcMutex;
+ mongo::mutex NamespaceDetailsTransient::_qcMutex;
+ mongo::mutex NamespaceDetailsTransient::_isMutex;
map< string, shared_ptr< NamespaceDetailsTransient > > NamespaceDetailsTransient::_map;
typedef map< string, shared_ptr< NamespaceDetailsTransient > >::iterator ouriter;
void NamespaceDetailsTransient::reset() {
+ DEV assertInWriteLock();
clearQueryCache();
_keysComputed = false;
_indexSpecs.clear();
@@ -595,11 +644,13 @@ namespace mongo {
_keysComputed = true;
_indexKeys.clear();
NamespaceDetails *d = nsdetails(_ns.c_str());
+ if ( ! d )
+ return;
NamespaceDetails::IndexIterator i = d->ii();
while( i.more() )
i.next().keyPattern().getFieldNames(_indexKeys);
}
-
+
void NamespaceDetailsTransient::cllStart( int logSizeMb ) {
assertInWriteLock();
_cll_ns = "local.temp.oplog." + _ns;
@@ -607,7 +658,7 @@ namespace mongo {
stringstream spec;
// 128MB
spec << "{size:" << logSizeMb * 1024 * 1024 << ",capped:true,autoIndexId:false}";
- setClient( _cll_ns.c_str() );
+ Client::Context ct( _cll_ns );
string err;
massert( 10347 , "Could not create log ns", userCreateNS( _cll_ns.c_str(), fromjson( spec.str() ), err, false ) );
NamespaceDetails *d = nsdetails( _cll_ns.c_str() );
@@ -633,7 +684,7 @@ namespace mongo {
assertInWriteLock();
if ( !_cll_enabled )
return;
- setClient( _cll_ns.c_str() );
+ Client::Context ctx( _cll_ns );
dropNS( _cll_ns );
}
diff --git a/db/namespace.h b/db/namespace.h
index df4c62f..1b1a954 100644
--- a/db/namespace.h
+++ b/db/namespace.h
@@ -21,7 +21,7 @@
#include "../stdafx.h"
#include "jsobj.h"
#include "queryutil.h"
-#include "storage.h"
+#include "diskloc.h"
#include "../util/hashtab.h"
#include "../util/mmap.h"
@@ -75,6 +75,10 @@ namespace mongo {
NamespaceString( const char * ns ) { init(ns); }
NamespaceString( const string& ns ) { init(ns.c_str()); }
+ string ns() const {
+ return db + '.' + coll;
+ }
+
bool isSystem() {
return strncmp(coll.c_str(), "system.", 7) == 0;
}
@@ -100,6 +104,10 @@ namespace mongo {
massert( 10348 , "ns name too long", s.size() < MaxNsLen);
return s;
}
+ bool isExtra() const {
+ const char *p = strstr(buf, "$extra");
+ return p && p[6] == 0; //==0 important in case an index uses name "$extra_1" for example
+ }
void kill() {
buf[0] = 0x7f;
@@ -186,6 +194,9 @@ namespace mongo {
BOOST_STATIC_ASSERT( NIndexesMax == NIndexesBase + NIndexesExtra );
+ /* called when loaded from disk */
+ void onLoad(const Namespace& k);
+
NamespaceDetails( const DiskLoc &loc, bool _capped ) {
/* be sure to initialize new fields here -- doesn't default to zeroes the way we use it */
firstExtent = lastExtent = capExtent = loc;
@@ -251,6 +262,13 @@ namespace mongo {
int backgroundIndexBuildInProgress; // 1 if in prog
char reserved[76];
+ /* when a background index build is in progress, we don't count the index in nIndexes until
+ complete, yet need to still use it in _indexRecord() - thus we use this function for that.
+ */
+ int nIndexesBeingBuilt() const {
+ return nIndexes + backgroundIndexBuildInProgress;
+ }
+
/* NOTE: be careful with flags. are we manipulating them in read locks? if so,
this isn't thread safe. TODO
*/
@@ -264,6 +282,10 @@ namespace mongo {
return _indexes[idxNo];
return extra()->details[idxNo-NIndexesBase];
}
+ IndexDetails& backgroundIdx() {
+ DEV assert(backgroundIndexBuildInProgress);
+ return idx(nIndexes);
+ }
class IndexIterator {
friend class NamespaceDetails;
@@ -324,7 +346,7 @@ namespace mongo {
/* add a new index. does not add to system.indexes etc. - just to NamespaceDetails.
caller must populate returned object.
*/
- IndexDetails& addIndex(const char *thisns);
+ IndexDetails& addIndex(const char *thisns, bool resetTransient=true);
void aboutToDeleteAnIndex() {
flags &= ~Flag_HaveIdIndex;
@@ -410,7 +432,7 @@ namespace mongo {
void checkMigrate();
- long long storageSize();
+ long long storageSize( int * numExtents = 0 );
private:
bool cappedMayDelete() const {
@@ -450,7 +472,7 @@ namespace mongo {
static std::map< string, shared_ptr< NamespaceDetailsTransient > > _map;
public:
NamespaceDetailsTransient(const char *ns) : _ns(ns), _keysComputed(false), _qcWriteCount(), _cll_enabled() { }
- /* _get() is not threadsafe */
+ /* _get() is not threadsafe -- see get_inlock() comments */
static NamespaceDetailsTransient& _get(const char *ns);
/* use get_w() when doing write operations */
static NamespaceDetailsTransient& get_w(const char *ns) {
@@ -484,12 +506,16 @@ namespace mongo {
/* IndexSpec caching */
private:
map<const IndexDetails*,IndexSpec> _indexSpecs;
+ static mongo::mutex _isMutex;
public:
const IndexSpec& getIndexSpec( const IndexDetails * details ){
- DEV assertInWriteLock();
IndexSpec& spec = _indexSpecs[details];
- if ( spec.meta.isEmpty() ){
- spec.reset( details->info );
+ if ( ! spec._finishedInit ){
+ scoped_lock lk(_isMutex);
+ if ( ! spec._finishedInit ){
+ spec.reset( details );
+ assert( spec._finishedInit );
+ }
}
return spec;
}
@@ -499,7 +525,7 @@ namespace mongo {
int _qcWriteCount;
map< QueryPattern, pair< BSONObj, long long > > _qcCache;
public:
- static boost::mutex _qcMutex;
+ static mongo::mutex _qcMutex;
/* you must be in the qcMutex when calling this (and using the returned val): */
static NamespaceDetailsTransient& get_inlock(const char *ns) {
return _get(ns);
@@ -555,9 +581,9 @@ namespace mongo {
BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::Extra) <= sizeof(NamespaceDetails) );
public:
NamespaceIndex(const string &dir, const string &database) :
- ht( 0 ),
- dir_( dir ),
- database_( database ) {}
+ ht( 0 ),
+ dir_( dir ),
+ database_( database ) {}
/* returns true if new db will be created if we init lazily */
bool exists() const;
@@ -637,6 +663,7 @@ namespace mongo {
private:
boost::filesystem::path path() const;
+ void maybeMkdir() const;
MemoryMappedFile f;
HashTable<Namespace,NamespaceDetails> *ht;
@@ -644,7 +671,8 @@ namespace mongo {
string database_;
};
- extern string dbpath; // --dbpath parm
+ extern string dbpath; // --dbpath parm
+ extern bool directoryperdb;
// Rename a namespace within current 'client' db.
// (Arguments should include db name)
diff --git a/db/nonce.cpp b/db/nonce.cpp
index 4c677be..d8db58d 100644
--- a/db/nonce.cpp
+++ b/db/nonce.cpp
@@ -49,8 +49,8 @@ namespace mongo {
}
nonce Security::getNonce(){
- static boost::mutex m;
- boostlock lk(m);
+ static mongo::mutex m;
+ scoped_lock lk(m);
/* question/todo: /dev/random works on OS X. is it better
to use that than random() / srandom()?
diff --git a/db/pdfile.cpp b/db/pdfile.cpp
index 18df5f1..1c4608c 100644
--- a/db/pdfile.cpp
+++ b/db/pdfile.cpp
@@ -30,6 +30,7 @@ _ disallow system* manipulations from the database.
#include "../util/mmap.h"
#include "../util/hashtab.h"
#include "../util/file_allocator.h"
+#include "../util/processinfo.h"
#include "btree.h"
#include <algorithm>
#include <list>
@@ -40,10 +41,63 @@ _ disallow system* manipulations from the database.
#include "queryutil.h"
#include "extsort.h"
#include "curop.h"
+#include "background.h"
namespace mongo {
+ map<string, unsigned> BackgroundOperation::dbsInProg;
+ set<string> BackgroundOperation::nsInProg;
+
+ bool BackgroundOperation::inProgForDb(const char *db) {
+ assertInWriteLock();
+ return dbsInProg[db] != 0;
+ }
+
+ bool BackgroundOperation::inProgForNs(const char *ns) {
+ assertInWriteLock();
+ return nsInProg.count(ns) != 0;
+ }
+
+ void BackgroundOperation::assertNoBgOpInProgForDb(const char *db) {
+ uassert(12586, "cannot perform operation: a background operation is currently running for this database",
+ !inProgForDb(db));
+ }
+
+ void BackgroundOperation::assertNoBgOpInProgForNs(const char *ns) {
+ uassert(12587, "cannot perform operation: a background operation is currently running for this collection",
+ !inProgForNs(ns));
+ }
+
+ BackgroundOperation::BackgroundOperation(const char *ns) : _ns(ns) {
+ assertInWriteLock();
+ dbsInProg[_ns.db]++;
+ assert( nsInProg.count(_ns.ns()) == 0 );
+ nsInProg.insert(_ns.ns());
+ }
+
+ BackgroundOperation::~BackgroundOperation() {
+ assertInWriteLock();
+ dbsInProg[_ns.db]--;
+ nsInProg.erase(_ns.ns());
+ }
+
+ void BackgroundOperation::dump(stringstream& ss) {
+ if( nsInProg.size() ) {
+ ss << "\n<b>Background Jobs in Progress</b>\n";
+ for( set<string>::iterator i = nsInProg.begin(); i != nsInProg.end(); i++ )
+ ss << " " << *i << '\n';
+ }
+ for( map<string,unsigned>::iterator i = dbsInProg.begin(); i != dbsInProg.end(); i++ ) {
+ if( i->second )
+ ss << "database " << i->first << ": " << i->second << '\n';
+ }
+ }
+
+ /* ----------------------------------------- */
+
string dbpath = "/data/db/";
+ bool directoryperdb = false;
+ string repairpath;
DataFileMgr theDataFileMgr;
DatabaseHolder dbHolder;
@@ -53,7 +107,8 @@ namespace mongo {
extern int otherTraceLevel;
void addNewNamespaceToCatalog(const char *ns, const BSONObj *options = 0);
void ensureIdIndexForNewNs(const char *ns) {
- if ( !strstr( ns, ".system." ) && !strstr( ns, ".$freelist" ) ) {
+ if ( ( strstr( ns, ".system." ) == 0 || legalClientSystemNS( ns , false ) ) &&
+ strstr( ns, ".$freelist" ) == 0 ){
log( 1 ) << "adding _id index for new collection" << endl;
ensureHaveIdIndex( ns );
}
@@ -63,10 +118,13 @@ namespace mongo {
stringstream ss;
Client * c = currentClient.get();
if ( c ){
- Database *database = c->database();
- if ( database ) {
- ss << database->name << ' ';
- ss << cc().ns() << ' ';
+ Client::Context * cx = c->getContext();
+ if ( cx ){
+ Database *database = cx->db();
+ if ( database ) {
+ ss << database->name << ' ';
+ ss << cx->ns() << ' ';
+ }
}
}
return ss.str();
@@ -105,7 +163,7 @@ namespace mongo {
addNewNamespaceToCatalog(ns, j.isEmpty() ? 0 : &j);
long long size = initialExtentSize(128);
- BSONElement e = j.findElement("size");
+ BSONElement e = j.getField("size");
if ( e.isNumber() ) {
size = (long long) e.number();
size += 256;
@@ -116,10 +174,10 @@ namespace mongo {
bool newCapped = false;
int mx = 0;
- e = j.findElement("capped");
+ e = j.getField("capped");
if ( e.type() == Bool && e.boolean() ) {
newCapped = true;
- e = j.findElement("max");
+ e = j.getField("max");
if ( e.isNumber() ) {
mx = (int) e.number();
}
@@ -127,7 +185,7 @@ namespace mongo {
// $nExtents just for debug/testing. We create '$nExtents' extents,
// each of size 'size'.
- e = j.findElement( "$nExtents" );
+ e = j.getField( "$nExtents" );
int nExtents = int( e.number() );
Database *database = cc().database();
if ( nExtents > 0 ) {
@@ -487,13 +545,11 @@ namespace mongo {
/*---------------------------------------------------------------------*/
auto_ptr<Cursor> DataFileMgr::findAll(const char *ns, const DiskLoc &startLoc) {
- DiskLoc loc;
- bool found = nsindex(ns)->find(ns, loc);
- if ( !found ) {
- // out() << "info: findAll() namespace does not exist: " << ns << endl;
+ NamespaceDetails * d = nsdetails( ns );
+ if ( ! d )
return auto_ptr<Cursor>(new BasicCursor(DiskLoc()));
- }
+ DiskLoc loc = d->firstExtent;
Extent *e = getExtent(loc);
DEBUGGING {
@@ -512,40 +568,42 @@ namespace mongo {
}
out() << endl;
- nsdetails(ns)->dumpDeleted(&extents);
+ d->dumpDeleted(&extents);
}
- if ( !nsdetails( ns )->capped ) {
- if ( !startLoc.isNull() )
- return auto_ptr<Cursor>(new BasicCursor( startLoc ));
- while ( e->firstRecord.isNull() && !e->xnext.isNull() ) {
- /* todo: if extent is empty, free it for reuse elsewhere.
- that is a bit complicated have to clean up the freelists.
- */
- RARELY out() << "info DFM::findAll(): extent " << loc.toString() << " was empty, skipping ahead " << ns << endl;
- // find a nonempty extent
- // it might be nice to free the whole extent here! but have to clean up free recs then.
- e = e->getNextExtent();
- }
- return auto_ptr<Cursor>(new BasicCursor( e->firstRecord ));
- } else {
- return auto_ptr< Cursor >( new ForwardCappedCursor( nsdetails( ns ), startLoc ) );
+ if ( d->capped )
+ return auto_ptr< Cursor >( new ForwardCappedCursor( d , startLoc ) );
+
+ if ( !startLoc.isNull() )
+ return auto_ptr<Cursor>(new BasicCursor( startLoc ));
+
+ while ( e->firstRecord.isNull() && !e->xnext.isNull() ) {
+ /* todo: if extent is empty, free it for reuse elsewhere.
+ that is a bit complicated have to clean up the freelists.
+ */
+ RARELY out() << "info DFM::findAll(): extent " << loc.toString() << " was empty, skipping ahead " << ns << endl;
+ // find a nonempty extent
+ // it might be nice to free the whole extent here! but have to clean up free recs then.
+ e = e->getNextExtent();
}
+ return auto_ptr<Cursor>(new BasicCursor( e->firstRecord ));
}
/* get a table scan cursor, but can be forward or reverse direction.
order.$natural - if set, > 0 means forward (asc), < 0 backward (desc).
*/
auto_ptr<Cursor> findTableScan(const char *ns, const BSONObj& order, const DiskLoc &startLoc) {
- BSONElement el = order.findElement("$natural"); // e.g., { $natural : -1 }
+ BSONElement el = order.getField("$natural"); // e.g., { $natural : -1 }
if ( el.number() >= 0 )
return DataFileMgr::findAll(ns, startLoc);
-
+
// "reverse natural order"
NamespaceDetails *d = nsdetails(ns);
+
if ( !d )
return auto_ptr<Cursor>(new BasicCursor(DiskLoc()));
+
if ( !d->capped ) {
if ( !startLoc.isNull() )
return auto_ptr<Cursor>(new ReverseCursor( startLoc ));
@@ -583,6 +641,8 @@ namespace mongo {
NamespaceDetails* d = nsdetails(nsToDrop.c_str());
uassert( 10086 , (string)"ns not found: " + nsToDrop , d );
+ BackgroundOperation::assertNoBgOpInProgForNs(nsToDrop.c_str());
+
NamespaceString s(nsToDrop);
assert( s.db == cc().database()->name );
if( s.isSystem() ) {
@@ -634,29 +694,33 @@ namespace mongo {
log(1) << "dropCollection: " << name << endl;
NamespaceDetails *d = nsdetails(name.c_str());
assert( d );
+
+ BackgroundOperation::assertNoBgOpInProgForNs(name.c_str());
+
if ( d->nIndexes != 0 ) {
try {
- assert( deleteIndexes(d, name.c_str(), "*", errmsg, result, true) );
+ assert( dropIndexes(d, name.c_str(), "*", errmsg, result, true) );
}
catch( DBException& ) {
- uasserted(12503,"drop: deleteIndexes for collection failed - consider trying repair");
+ uasserted(12503,"drop: dropIndexes for collection failed - consider trying repair");
}
assert( d->nIndexes == 0 );
}
- log(1) << "\t deleteIndexes done" << endl;
+ log(1) << "\t dropIndexes done" << endl;
result.append("ns", name.c_str());
ClientCursor::invalidate(name.c_str());
+ Top::global.collectionDropped( name );
dropNS(name);
}
int nUnindexes = 0;
- void _unindexRecord(IndexDetails& id, BSONObj& obj, const DiskLoc& dl, bool logMissing = true) {
+ /* unindex all keys in index for this record. */
+ static void _unindexRecord(IndexDetails& id, BSONObj& obj, const DiskLoc& dl, bool logMissing = true) {
BSONObjSetDefaultOrder keys;
id.getKeysFromObject(obj, keys);
for ( BSONObjSetDefaultOrder::iterator i=keys.begin(); i != keys.end(); i++ ) {
BSONObj j = *i;
- // out() << "UNINDEX: j:" << j.toString() << " head:" << id.head.toString() << dl.toString() << endl;
if ( otherTraceLevel >= 5 ) {
out() << "_unindexRecord() " << obj.toString();
out() << "\n unindex:" << j.toString() << endl;
@@ -666,9 +730,9 @@ namespace mongo {
try {
ok = id.head.btree()->unindex(id.head, id, j, dl);
}
- catch (AssertionException&) {
+ catch (AssertionException& e) {
problem() << "Assertion failure: _unindex failed " << id.indexNamespace() << endl;
- out() << "Assertion failure: _unindex failed" << '\n';
+ out() << "Assertion failure: _unindex failed: " << e.what() << '\n';
out() << " obj:" << obj.toString() << '\n';
out() << " key:" << j.toString() << '\n';
out() << " dl:" << dl.toString() << endl;
@@ -682,12 +746,14 @@ namespace mongo {
}
/* unindex all keys in all indexes for this record. */
- void unindexRecord(NamespaceDetails *d, Record *todelete, const DiskLoc& dl, bool noWarn = false) {
- if ( d->nIndexes == 0 ) return;
+ static void unindexRecord(NamespaceDetails *d, Record *todelete, const DiskLoc& dl, bool noWarn = false) {
BSONObj obj(todelete);
- NamespaceDetails::IndexIterator i = d->ii();
- while( i.more() ) {
- _unindexRecord(i.next(), obj, dl, !noWarn);
+ int n = d->nIndexes;
+ for ( int i = 0; i < n; i++ )
+ _unindexRecord(d->idx(i), obj, dl, !noWarn);
+ if( d->backgroundIndexBuildInProgress ) {
+ // always pass nowarn here, as this one may be missing for valid reasons as we are concurrently building it
+ _unindexRecord(d->idx(n), obj, dl, false);
}
}
@@ -763,19 +829,20 @@ namespace mongo {
/** Note: if the object shrinks a lot, we don't free up space, we leave extra at end of the record.
*/
- const DiskLoc DataFileMgr::update(const char *ns,
- Record *toupdate, const DiskLoc& dl,
- const char *_buf, int _len, OpDebug& debug)
+ const DiskLoc DataFileMgr::updateRecord(
+ const char *ns,
+ NamespaceDetails *d,
+ NamespaceDetailsTransient *nsdt,
+ Record *toupdate, const DiskLoc& dl,
+ const char *_buf, int _len, OpDebug& debug)
{
StringBuilder& ss = debug.str;
dassert( toupdate == dl.rec() );
- NamespaceDetails *d = nsdetails(ns);
-
BSONObj objOld(toupdate);
BSONObj objNew(_buf);
- assert( objNew.objsize() == _len );
- assert( objNew.objdata() == _buf );
+ DEV assert( objNew.objsize() == _len );
+ DEV assert( objNew.objdata() == _buf );
if( !objNew.hasElement("_id") && objOld.hasElement("_id") ) {
/* add back the old _id value if the update removes it. Note this implementation is slow
@@ -795,7 +862,7 @@ namespace mongo {
*/
vector<IndexChanges> changes;
getIndexChanges(changes, *d, objNew, objOld);
- dupCheck(changes, *d);
+ dupCheck(changes, *d, dl);
if ( toupdate->netLength() < objNew.objsize() ) {
// doesn't fit. reallocate -----------------------------------------------------
@@ -807,13 +874,14 @@ namespace mongo {
return insert(ns, objNew.objdata(), objNew.objsize(), false);
}
- NamespaceDetailsTransient::get_w( ns ).notifyOfWriteOp();
+ nsdt->notifyOfWriteOp();
d->paddingFits();
/* have any index keys changed? */
{
unsigned keyUpdates = 0;
- for ( int x = 0; x < d->nIndexes; x++ ) {
+ int z = d->nIndexesBeingBuilt();
+ for ( int x = 0; x < z; x++ ) {
IndexDetails& idx = d->idx(x);
for ( unsigned i = 0; i < changes[x].removed.size(); i++ ) {
try {
@@ -859,10 +927,8 @@ namespace mongo {
return sz;
}
- int deb=0;
-
- /* add keys to indexes for a new record */
- inline void _indexRecord(NamespaceDetails *d, int idxNo, BSONObj& obj, DiskLoc newRecordLoc, bool dupsAllowed) {
+ /* add keys to index idxNo for a new record */
+ static inline void _indexRecord(NamespaceDetails *d, int idxNo, BSONObj& obj, DiskLoc recordLoc, bool dupsAllowed) {
IndexDetails& idx = d->idx(idxNo);
BSONObjSetDefaultOrder keys;
idx.getKeysFromObject(obj, keys);
@@ -872,12 +938,16 @@ namespace mongo {
if( ++n == 2 ) {
d->setIndexIsMultikey(idxNo);
}
- assert( !newRecordLoc.isNull() );
+ assert( !recordLoc.isNull() );
try {
- idx.head.btree()->bt_insert(idx.head, newRecordLoc,
+ idx.head.btree()->bt_insert(idx.head, recordLoc,
*i, order, dupsAllowed, idx);
}
- catch (AssertionException& ) {
+ catch (AssertionException& e) {
+ if( e.code == 10287 && idxNo == d->nIndexes ) {
+ DEV log() << "info: caught key already in index on bg indexing (ok)" << endl;
+ continue;
+ }
if( !dupsAllowed ) {
// dup key exception, presumably.
throw;
@@ -913,10 +983,10 @@ namespace mongo {
}
// throws DBException
- /* _ TODO dropDups
- */
unsigned long long fastBuildIndex(const char *ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) {
- // testSorting();
+ assert( d->backgroundIndexBuildInProgress == 0 );
+ CurOp * op = cc().curop();
+
Timer t;
log() << "Buildindex " << ns << " idxNo:" << idxNo << ' ' << idx.info.obj().toString() << endl;
@@ -926,13 +996,16 @@ namespace mongo {
BSONObj order = idx.keyPattern();
idx.head.Null();
+
+ if ( logLevel > 1 ) printMemInfo( "before index start" );
/* get and sort all the keys ----- */
unsigned long long n = 0;
auto_ptr<Cursor> c = theDataFileMgr.findAll(ns);
BSONObjExternalSorter sorter(order);
+ sorter.hintNumObjects( d->nrecords );
unsigned long long nkeys = 0;
- ProgressMeter pm( d->nrecords , 10 );
+ ProgressMeter & pm = op->setMessage( "index: (1/3) external sort" , d->nrecords , 10 );
while ( c->ok() ) {
BSONObj o = c->current();
DiskLoc loc = c->currLoc();
@@ -947,12 +1020,20 @@ namespace mongo {
sorter.add(*i, loc);
nkeys++;
}
-
+
c->advance();
n++;
pm.hit();
+ if ( logLevel > 1 && n % 10000 == 0 ){
+ printMemInfo( "\t iterating objects" );
+ }
+
};
+ pm.finished();
+
+ if ( logLevel > 1 ) printMemInfo( "before final sort" );
sorter.sort();
+ if ( logLevel > 1 ) printMemInfo( "after final sort" );
log(t.seconds() > 5 ? 0 : 1) << "\t external sort used : " << sorter.numFiles() << " files " << " in " << t.seconds() << " secs" << endl;
@@ -963,21 +1044,23 @@ namespace mongo {
BtreeBuilder btBuilder(dupsAllowed, idx);
BSONObj keyLast;
auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
- ProgressMeter pm2( nkeys , 10 );
+ pm = op->setMessage( "index: (2/3) btree bottom up" , nkeys , 10 );
while( i->more() ) {
RARELY killCurrentOp.checkForInterrupt();
BSONObjExternalSorter::Data d = i->next();
- //cout<<"TEMP SORTER next " << d.first.toString() << endl;
try {
btBuilder.addKey(d.first, d.second);
}
- catch( AssertionException& ) {
+ catch( AssertionException& e ) {
if ( dupsAllowed ){
// unknow exception??
throw;
}
+ if( e.interrupted() )
+ throw;
+
if ( ! dropDups )
throw;
@@ -987,8 +1070,11 @@ namespace mongo {
dupsToDrop.push_back(d.second);
uassert( 10092 , "too may dups on index build with dropDups=true", dupsToDrop.size() < 1000000 );
}
- pm2.hit();
+ pm.hit();
}
+ pm.finished();
+ op->setMessage( "index: (3/3) btree-middle" );
+ log(t.seconds() > 10 ? 0 : 1 ) << "\t done building bottom layer, going to commit" << endl;
btBuilder.commit();
wassert( btBuilder.getn() == nkeys || dropDups );
}
@@ -1001,32 +1087,61 @@ namespace mongo {
return n;
}
- static class BackgroundIndexBuildJobs {
+ class BackgroundIndexBuildJob : public BackgroundOperation {
unsigned long long addExistingToIndex(const char *ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) {
bool dupsAllowed = !idx.unique();
bool dropDups = idx.dropDups();
+ ProgressMeter& progress = cc().curop()->setMessage( "bg index build" , d->nrecords );
+
unsigned long long n = 0;
- auto_ptr<Cursor> c = theDataFileMgr.findAll(ns);
- while ( c->ok() ) {
- BSONObj js = c->current();
+ auto_ptr<ClientCursor> cc;
+ {
+ auto_ptr<Cursor> c = theDataFileMgr.findAll(ns);
+ cc.reset( new ClientCursor(c, ns, false) );
+ }
+ CursorId id = cc->cursorid;
+
+ while ( cc->c->ok() ) {
+ BSONObj js = cc->c->current();
try {
- _indexRecord(d, idxNo, js, c->currLoc(),dupsAllowed);
- c->advance();
+ _indexRecord(d, idxNo, js, cc->c->currLoc(), dupsAllowed);
+ cc->c->advance();
} catch( AssertionException& e ) {
+ if( e.interrupted() )
+ throw;
+
if ( dropDups ) {
- DiskLoc toDelete = c->currLoc();
- c->advance();
+ DiskLoc toDelete = cc->c->currLoc();
+ bool ok = cc->c->advance();
+ cc->updateLocation();
theDataFileMgr.deleteRecord( ns, toDelete.rec(), toDelete, false, true );
+ if( ClientCursor::find(id, false) == 0 ) {
+ cc.release();
+ if( !ok ) {
+ /* we were already at the end. normal. */
+ }
+ else {
+ uasserted(12585, "cursor gone during bg index; dropDups");
+ }
+ break;
+ }
} else {
- _log() << endl;
- log(2) << "addExistingToIndex exception " << e.what() << endl;
+ log() << "background addExistingToIndex exception " << e.what() << endl;
throw;
}
}
n++;
- };
+ progress.hit();
+
+ if ( n % 128 == 0 && !cc->yield() ) {
+ cc.release();
+ uasserted(12584, "cursor gone during bg index");
+ break;
+ }
+ }
+ progress.done();
return n;
}
@@ -1034,72 +1149,76 @@ namespace mongo {
that way on a crash/restart, we don't think we are still building one. */
set<NamespaceDetails*> bgJobsInProgress;
- void prep(NamespaceDetails *d) {
+ void prep(const char *ns, NamespaceDetails *d) {
assertInWriteLock();
- assert( bgJobsInProgress.count(d) == 0 );
bgJobsInProgress.insert(d);
d->backgroundIndexBuildInProgress = 1;
+ d->nIndexes--;
}
-
- public:
- /* Note you cannot even do a foreground index build if a background is in progress,
- as bg build assumes it is the last index in the array!
- */
- void checkInProg(NamespaceDetails *d) {
+ void done(const char *ns, NamespaceDetails *d) {
+ d->nIndexes++;
+ d->backgroundIndexBuildInProgress = 0;
+ NamespaceDetailsTransient::get_w(ns).addedIndex(); // clear query optimizer cache
assertInWriteLock();
- uassert(12580, "already building an index for this namespace in background", bgJobsInProgress.count(d) == 0);
}
-/* todo: clean bg flag on loading of NamespaceDetails */
+ public:
+ BackgroundIndexBuildJob(const char *ns) : BackgroundOperation(ns) { }
unsigned long long go(string ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) {
- unsigned long long n;
- prep(d);
+ unsigned long long n = 0;
+
+ prep(ns.c_str(), d);
+ assert( idxNo == d->nIndexes );
try {
idx.head = BtreeBucket::addBucket(idx);
n = addExistingToIndex(ns.c_str(), d, idx, idxNo);
}
catch(...) {
- assertInWriteLock();
- bgJobsInProgress.erase(d);
- d->backgroundIndexBuildInProgress = 0;
+ if( cc().database() && nsdetails(ns.c_str()) == d ) {
+ assert( idxNo == d->nIndexes );
+ done(ns.c_str(), d);
+ }
+ else {
+ log() << "ERROR: db gone during bg index?" << endl;
+ }
throw;
}
+ assert( idxNo == d->nIndexes );
+ done(ns.c_str(), d);
return n;
}
- } backgroundIndex;
+ };
// throws DBException
- static void buildAnIndex(string ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) {
- log() << "building new index on " << idx.keyPattern() << " for " << ns << "..." << endl;
+ static void buildAnIndex(string ns, NamespaceDetails *d, IndexDetails& idx, int idxNo, bool background) {
+ log() << "building new index on " << idx.keyPattern() << " for " << ns << endl;
Timer t;
unsigned long long n;
- BSONObj info = idx.info.obj();
- bool background = info["background"].trueValue();
- if( background ) {
- log() << "WARNING: background index build not yet implemented" << endl;
+ if( background ) {
+ log(2) << "buildAnIndex: background=true\n";
}
+ assert( !BackgroundOperation::inProgForNs(ns.c_str()) ); // should have been checked earlier, better not be...
if( !background ) {
n = fastBuildIndex(ns.c_str(), d, idx, idxNo);
assert( !idx.head.isNull() );
}
else {
- n = backgroundIndex.go(ns, d, idx, idxNo);
+ BackgroundIndexBuildJob j(ns.c_str());
+ n = j.go(ns, d, idx, idxNo);
}
log() << "done for " << n << " records " << t.millis() / 1000.0 << "secs" << endl;
}
/* add keys to indexes for a new record */
- void indexRecord(NamespaceDetails *d, const void *buf, int len, DiskLoc newRecordLoc) {
- BSONObj obj((const char *)buf);
-
- /*UNIQUE*/
- for ( int i = 0; i < d->nIndexes; i++ ) {
+ static void indexRecord(NamespaceDetails *d, BSONObj obj, DiskLoc loc) {
+ int n = d->nIndexesBeingBuilt();
+ for ( int i = 0; i < n; i++ ) {
try {
bool unique = d->idx(i).unique();
- _indexRecord(d, i, obj, newRecordLoc, /*dupsAllowed*/!unique);
+ _indexRecord(d, i, obj, loc, /*dupsAllowed*/!unique);
}
catch( DBException& ) {
/* try to roll back previously added index entries
@@ -1108,7 +1227,7 @@ namespace mongo {
*/
for( int j = 0; j <= i; j++ ) {
try {
- _unindexRecord(d->idx(j), obj, newRecordLoc, false);
+ _unindexRecord(d->idx(j), obj, loc, false);
}
catch(...) {
log(3) << "unindex fails on rollback after unique failure\n";
@@ -1119,7 +1238,7 @@ namespace mongo {
}
}
- extern BSONObj id_obj; // { _id : ObjectId("000000000000000000000000") }
+ extern BSONObj id_obj; // { _id : 1 }
void ensureHaveIdIndex(const char *ns) {
NamespaceDetails *d = nsdetails(ns);
@@ -1179,12 +1298,31 @@ namespace mongo {
bool prepareToBuildIndex(const BSONObj& io, bool god, string& sourceNS, NamespaceDetails *&sourceCollection);
+ // We are now doing two btree scans for all unique indexes (one here, and one when we've
+ // written the record to the collection. This could be made more efficient inserting
+ // dummy data here, keeping pointers to the btree nodes holding the dummy data and then
+ // updating the dummy data with the DiskLoc of the real record.
+ void checkNoIndexConflicts( NamespaceDetails *d, const BSONObj &obj ) {
+ for ( int idxNo = 0; idxNo < d->nIndexes; idxNo++ ) {
+ if( d->idx(idxNo).unique() ) {
+ IndexDetails& idx = d->idx(idxNo);
+ BSONObjSetDefaultOrder keys;
+ idx.getKeysFromObject(obj, keys);
+ BSONObj order = idx.keyPattern();
+ for ( BSONObjSetDefaultOrder::iterator i=keys.begin(); i != keys.end(); i++ ) {
+ uassert( 12582, "duplicate key insert for unique index of capped collection",
+ idx.head.btree()->findSingle(idx, idx.head, *i ).isNull() );
+ }
+ }
+ }
+ }
+
/* note: if god==true, you may pass in obuf of NULL and then populate the returned DiskLoc
after the call -- that will prevent a double buffer copy in some cases (btree.cpp).
*/
DiskLoc DataFileMgr::insert(const char *ns, const void *obuf, int len, bool god, const BSONElement &writeId, bool mayAddIndex) {
bool wouldAddIndex = false;
- uassert( 10093 , "cannot insert into reserved $ collection", god || strchr(ns, '$') == 0 );
+ massert( 10093 , "cannot insert into reserved $ collection", god || strchr(ns, '$') == 0 );
uassert( 10094 , "invalid ns", strchr( ns , '.' ) > 0 );
const char *sys = strstr(ns, "system.");
if ( sys ) {
@@ -1212,8 +1350,8 @@ namespace mongo {
/* todo: shouldn't be in the namespace catalog until after the allocations here work.
also if this is an addIndex, those checks should happen before this!
*/
- // This creates first file in the database.
- cc().database()->newestFile()->createExtent(ns, initialExtentSize(len));
+ // This may create first file in the database.
+ cc().database()->allocExtent(ns, initialExtentSize(len), false);
d = nsdetails(ns);
if ( !god )
ensureIdIndexForNewNs(ns);
@@ -1225,10 +1363,8 @@ namespace mongo {
string tabletoidxns;
if ( addIndex ) {
BSONObj io((const char *) obuf);
- backgroundIndex.checkInProg(d);
- if( !prepareToBuildIndex(io, god, tabletoidxns, tableToIndex) ) {
+ if( !prepareToBuildIndex(io, god, tabletoidxns, tableToIndex) )
return DiskLoc();
- }
}
const BSONElement *newId = &writeId;
@@ -1262,6 +1398,13 @@ namespace mongo {
d->paddingFactor = 1.0;
lenWHdr = len + Record::HeaderSize;
}
+
+ // If the collection is capped, check if the new object will violate a unique index
+ // constraint before allocating space.
+ if ( d->nIndexes && d->capped && !god ) {
+ checkNoIndexConflicts( d, BSONObj( reinterpret_cast<const char *>( obuf ) ) );
+ }
+
DiskLoc loc = d->alloc(ns, lenWHdr, extentLoc);
if ( loc.isNull() ) {
// out of space
@@ -1321,27 +1464,35 @@ namespace mongo {
NamespaceDetailsTransient::get_w( ns ).notifyOfWriteOp();
if ( tableToIndex ) {
+ BSONObj info = loc.obj();
+ bool background = info["background"].trueValue();
+
int idxNo = tableToIndex->nIndexes;
- IndexDetails& idx = tableToIndex->addIndex(tabletoidxns.c_str()); // clear transient info caches so they refresh; increments nIndexes
+ IndexDetails& idx = tableToIndex->addIndex(tabletoidxns.c_str(), !background); // clear transient info caches so they refresh; increments nIndexes
idx.info = loc;
try {
- buildAnIndex(tabletoidxns, tableToIndex, idx, idxNo);
+ buildAnIndex(tabletoidxns, tableToIndex, idx, idxNo, background);
} catch( DBException& ) {
- // save our error msg string as an exception on deleteIndexes will overwrite our message
+ // save our error msg string as an exception or dropIndexes will overwrite our message
LastError *le = lastError.get();
- assert( le );
- string saveerrmsg = le->msg;
- assert( !saveerrmsg.empty() );
+ int savecode = 0;
+ string saveerrmsg;
+ if ( le ) {
+ savecode = le->code;
+ saveerrmsg = le->msg;
+ }
// roll back this index
string name = idx.indexName();
BSONObjBuilder b;
string errmsg;
- bool ok = deleteIndexes(tableToIndex, tabletoidxns.c_str(), name.c_str(), errmsg, b, true);
+ bool ok = dropIndexes(tableToIndex, tabletoidxns.c_str(), name.c_str(), errmsg, b, true);
if( !ok ) {
log() << "failed to drop index after a unique key error building it: " << errmsg << ' ' << tabletoidxns << ' ' << name << endl;
}
- raiseError(12506,saveerrmsg.c_str());
+
+ assert( le && !saveerrmsg.empty() );
+ raiseError(savecode,saveerrmsg.c_str());
throw;
}
}
@@ -1349,11 +1500,13 @@ namespace mongo {
/* add this record to our indexes */
if ( d->nIndexes ) {
try {
- indexRecord(d, r->data/*buf*/, len, loc);
+ BSONObj obj(r->data);
+ indexRecord(d, obj, loc);
}
catch( AssertionException& e ) {
// should be a dup key error on _id index
- if( tableToIndex || d->capped ) {
+ if( tableToIndex || d->capped ) {
+ massert( 12583, "unexpected index insertion failure on capped collection", !d->capped );
string s = e.toString();
s += " : on addIndex/capped - collection and its index will not match";
uassert_nothrow(s.c_str());
@@ -1406,19 +1559,6 @@ namespace mongo {
return r;
}
- void DataFileMgr::init(const string& path ) {
- /* boost::filesystem::path path( dir );
- path /= "temp.dat";
- string pathString = path.string();
- temp.open(pathString.c_str(), 64 * 1024 * 1024);
- */
- }
-
- void pdfileInit() {
- // namespaceIndex.init(dbpath);
- theDataFileMgr.init(dbpath);
- }
-
} // namespace mongo
#include "clientcursor.h"
@@ -1427,63 +1567,75 @@ namespace mongo {
void dropDatabase(const char *ns) {
// ns is of the form "<dbname>.$cmd"
- char cl[256];
- nsToDatabase(ns, cl);
- log(1) << "dropDatabase " << cl << endl;
- assert( cc().database()->name == cl );
+ char db[256];
+ nsToDatabase(ns, db);
+ log(1) << "dropDatabase " << db << endl;
+ assert( cc().database()->name == db );
+
+ BackgroundOperation::assertNoBgOpInProgForDb(db);
- closeDatabase( cl );
- _deleteDataFiles(cl);
+ closeDatabase( db );
+ _deleteDataFiles(db);
}
typedef boost::filesystem::path Path;
// back up original database files to 'temp' dir
void _renameForBackup( const char *database, const Path &reservedPath ) {
+ Path newPath( reservedPath );
+ if ( directoryperdb )
+ newPath /= database;
class Renamer : public FileOp {
public:
- Renamer( const Path &reservedPath ) : reservedPath_( reservedPath ) {}
+ Renamer( const Path &newPath ) : newPath_( newPath ) {}
private:
- const boost::filesystem::path &reservedPath_;
+ const boost::filesystem::path &newPath_;
virtual bool apply( const Path &p ) {
if ( !boost::filesystem::exists( p ) )
return false;
- boost::filesystem::rename( p, reservedPath_ / ( p.leaf() + ".bak" ) );
+ boost::filesystem::rename( p, newPath_ / ( p.leaf() + ".bak" ) );
return true;
}
virtual const char * op() const {
return "renaming";
}
- } renamer( reservedPath );
+ } renamer( newPath );
_applyOpToDataFiles( database, renamer, true );
}
// move temp files to standard data dir
void _replaceWithRecovered( const char *database, const char *reservedPathString ) {
- class : public FileOp {
+ Path newPath( dbpath );
+ if ( directoryperdb )
+ newPath /= database;
+ class Replacer : public FileOp {
+ public:
+ Replacer( const Path &newPath ) : newPath_( newPath ) {}
+ private:
+ const boost::filesystem::path &newPath_;
virtual bool apply( const Path &p ) {
if ( !boost::filesystem::exists( p ) )
return false;
- boost::filesystem::rename( p, boost::filesystem::path(dbpath) / p.leaf() );
+ boost::filesystem::rename( p, newPath_ / p.leaf() );
return true;
}
virtual const char * op() const {
return "renaming";
}
- } renamer;
- _applyOpToDataFiles( database, renamer, true, reservedPathString );
+ } replacer( newPath );
+ _applyOpToDataFiles( database, replacer, true, reservedPathString );
}
// generate a directory name for storing temp data files
Path uniqueReservedPath( const char *prefix ) {
- Path dbPath = Path( dbpath );
+ Path repairPath = Path( repairpath );
Path reservedPath;
int i = 0;
bool exists = false;
do {
stringstream ss;
ss << prefix << "_repairDatabase_" << i++;
- reservedPath = dbPath / ss.str();
+ reservedPath = repairPath / ss.str();
BOOST_CHECK_EXCEPTION( exists = boost::filesystem::exists( reservedPath ) );
} while ( exists );
return reservedPath;
@@ -1540,6 +1692,8 @@ namespace mongo {
problem() << "repairDatabase " << dbName << endl;
assert( cc().database()->name == dbName );
+ BackgroundOperation::assertNoBgOpInProgForDb(dbName);
+
boost::intmax_t totalSize = dbSize( dbName );
boost::intmax_t freeSize = freeSpace();
if ( freeSize > -1 && freeSize < totalSize ) {
@@ -1553,14 +1707,19 @@ namespace mongo {
Path reservedPath =
uniqueReservedPath( ( preserveClonedFilesOnFailure || backupOriginalFiles ) ?
- "backup" : "tmp" );
+ "backup" : "$tmp" );
BOOST_CHECK_EXCEPTION( boost::filesystem::create_directory( reservedPath ) );
string reservedPathString = reservedPath.native_directory_string();
- assert( setClient( dbName, reservedPathString.c_str() ) );
-
- bool res = cloneFrom(localhost.c_str(), errmsg, dbName,
- /*logForReplication=*/false, /*slaveok*/false, /*replauth*/false, /*snapshot*/false);
- closeDatabase( dbName, reservedPathString.c_str() );
+
+ bool res;
+ { // clone to temp location, which effectively does repair
+ Client::Context ctx( dbName, reservedPathString );
+ assert( ctx.justCreated() );
+
+ res = cloneFrom(localhost.c_str(), errmsg, dbName,
+ /*logForReplication=*/false, /*slaveok*/false, /*replauth*/false, /*snapshot*/false);
+ closeDatabase( dbName, reservedPathString.c_str() );
+ }
if ( !res ) {
problem() << "clone failed for " << dbName << " with error: " << errmsg << endl;
@@ -1569,13 +1728,15 @@ namespace mongo {
return false;
}
- assert( !setClient( dbName ) );
+ Client::Context ctx( dbName );
closeDatabase( dbName );
- if ( backupOriginalFiles )
+ if ( backupOriginalFiles ) {
_renameForBackup( dbName, reservedPath );
- else
+ } else {
_deleteDataFiles( dbName );
+ BOOST_CHECK_EXCEPTION( boost::filesystem::create_directory( Path( dbpath ) / dbName ) );
+ }
_replaceWithRecovered( dbName, reservedPathString.c_str() );
@@ -1591,6 +1752,8 @@ namespace mongo {
string c = database;
c += '.';
boost::filesystem::path p(path);
+ if ( directoryperdb )
+ p /= database;
boost::filesystem::path q;
q = p / (c+"ns");
bool ok = false;
@@ -1619,8 +1782,8 @@ namespace mongo {
NamespaceDetails* nsdetails_notinline(const char *ns) { return nsdetails(ns); }
- bool DatabaseHolder::closeAll( const string& path , BSONObjBuilder& result ){
- log(2) << "DatabaseHolder::closeAll path:" << path << endl;
+ bool DatabaseHolder::closeAll( const string& path , BSONObjBuilder& result , bool force ){
+ log() << "DatabaseHolder::closeAll path:" << path << endl;
dbMutex.assertWriteLocked();
map<string,Database*>& m = _paths[path];
@@ -1633,14 +1796,23 @@ namespace mongo {
BSONObjBuilder bb( result.subarrayStart( "dbs" ) );
int n = 0;
+ int nNotClosed = 0;
for( set< string >::iterator i = dbs.begin(); i != dbs.end(); ++i ) {
string name = *i;
log(2) << "DatabaseHolder::closeAll path:" << path << " name:" << name << endl;
- setClient( name.c_str() , path );
- closeDatabase( name.c_str() , path );
- bb.append( bb.numStr( n++ ).c_str() , name );
+ Client::Context ctx( name , path );
+ if( !force && BackgroundOperation::inProgForDb(name.c_str()) ) {
+ log() << "WARNING: can't close database " << name << " because a bg job is in progress - try killOp command" << endl;
+ nNotClosed++;
+ }
+ else {
+ closeDatabase( name.c_str() , path );
+ bb.append( bb.numStr( n++ ).c_str() , name );
+ }
}
bb.done();
+ if( nNotClosed )
+ result.append("nNotClosed", nNotClosed);
return true;
}
diff --git a/db/pdfile.h b/db/pdfile.h
index 19a8322..85dc191 100644
--- a/db/pdfile.h
+++ b/db/pdfile.h
@@ -27,7 +27,7 @@
#include "../stdafx.h"
#include "../util/mmap.h"
-#include "storage.h"
+#include "diskloc.h"
#include "jsobjmanipulator.h"
#include "namespace.h"
#include "client.h"
@@ -98,8 +98,10 @@ namespace mongo {
static Extent* allocFromFreeList(const char *ns, int approxSize, bool capped = false);
/** @return DiskLoc where item ends up */
- const DiskLoc update(
+ const DiskLoc updateRecord(
const char *ns,
+ NamespaceDetails *d,
+ NamespaceDetailsTransient *nsdt,
Record *toupdate, const DiskLoc& dl,
const char *buf, int len, OpDebug& debug);
// The object o may be updated if modified on insert.
@@ -392,6 +394,10 @@ namespace mongo {
void _applyOpToDataFiles( const char *database, FileOp &fo, bool afterAllocator = false, const string& path = dbpath );
inline void _deleteDataFiles(const char *database) {
+ if ( directoryperdb ) {
+ BOOST_CHECK_EXCEPTION( boost::filesystem::remove_all( boost::filesystem::path( dbpath ) / database ) );
+ return;
+ }
class : public FileOp {
virtual bool apply( const boost::filesystem::path &p ) {
return boost::filesystem::remove( p );
@@ -443,6 +449,6 @@ namespace mongo {
void ensureHaveIdIndex(const char *ns);
- bool deleteIndexes( NamespaceDetails *d, const char *ns, const char *name, string &errmsg, BSONObjBuilder &anObjBuilder, bool maydeleteIdIndex );
+ bool dropIndexes( NamespaceDetails *d, const char *ns, const char *name, string &errmsg, BSONObjBuilder &anObjBuilder, bool maydeleteIdIndex );
} // namespace mongo
diff --git a/db/query.cpp b/db/query.cpp
index 9c82609..761a312 100644
--- a/db/query.cpp
+++ b/db/query.cpp
@@ -55,11 +55,11 @@ namespace mongo {
justOne_( justOne ),
count_(),
bestCount_( bestCount ),
- nScanned_() {
+ _nscanned() {
}
virtual void init() {
c_ = qp().newCursor();
- matcher_.reset( new CoveredIndexMatcher( qp().query(), qp().indexKey() ) );
+ _matcher.reset( new CoveredIndexMatcher( qp().query(), qp().indexKey() ) );
}
virtual void next() {
if ( !c_->ok() ) {
@@ -69,20 +69,20 @@ namespace mongo {
DiskLoc rloc = c_->currLoc();
- if ( matcher_->matches(c_->currKey(), rloc ) ) {
+ if ( _matcher->matches(c_->currKey(), rloc ) ) {
if ( !c_->getsetdup(rloc) )
++count_;
}
c_->advance();
- ++nScanned_;
+ ++_nscanned;
if ( count_ > bestCount_ )
bestCount_ = count_;
if ( count_ > 0 ) {
if ( justOne_ )
setComplete();
- else if ( nScanned_ >= 100 && count_ == bestCount_ )
+ else if ( _nscanned >= 100 && count_ == bestCount_ )
setComplete();
}
}
@@ -95,16 +95,17 @@ namespace mongo {
bool justOne_;
int count_;
int &bestCount_;
- long long nScanned_;
+ long long _nscanned;
auto_ptr< Cursor > c_;
- auto_ptr< CoveredIndexMatcher > matcher_;
+ auto_ptr< CoveredIndexMatcher > _matcher;
};
/* ns: namespace, e.g. <database>.<collection>
pattern: the "where" clause / criteria
justOne: stop after 1 match
+ god: allow access to system namespaces, and don't yield
*/
- int deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool logop, bool god) {
+ long long deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool logop, bool god) {
if( !god ) {
if ( strstr(ns, ".system.") ) {
/* note a delete from system.indexes would corrupt the db
@@ -124,7 +125,7 @@ namespace mongo {
return 0;
uassert( 10101 , "can't remove from a capped collection" , ! d->capped );
- int nDeleted = 0;
+ long long nDeleted = 0;
QueryPlanSet s( ns, pattern, BSONObj() );
int best = 0;
DeleteOp original( justOne, best );
@@ -136,18 +137,14 @@ namespace mongo {
CoveredIndexMatcher matcher(pattern, creal->indexKeyPattern());
- auto_ptr<ClientCursor> cc;
- cc.reset( new ClientCursor() );
- cc->c = creal;
- cc->ns = ns;
- cc->noTimeout();
+ auto_ptr<ClientCursor> cc( new ClientCursor(creal, ns, false) );
cc->setDoingDeletes( true );
CursorId id = cc->cursorid;
unsigned long long nScanned = 0;
do {
- if ( ++nScanned % 128 == 0 && !matcher.docMatcher().atomic() ) {
+ if ( ++nScanned % 128 == 0 && !god && !matcher.docMatcher().atomic() ) {
if ( ! cc->yield() ){
cc.release(); // has already been deleted elsewhere
break;
@@ -233,32 +230,9 @@ namespace mongo {
log( k == n ) << "killcursors: found " << k << " of " << n << '\n';
}
- BSONObj id_obj = fromjson("{\"_id\":ObjectId( \"000000000000000000000000\" )}");
+ BSONObj id_obj = fromjson("{\"_id\":1}");
BSONObj empty_obj = fromjson("{}");
- /* This is for languages whose "objects" are not well ordered (JSON is well ordered).
- [ { a : ... } , { b : ... } ] -> { a : ..., b : ... }
- */
- inline BSONObj transformOrderFromArrayFormat(BSONObj order) {
- /* note: this is slow, but that is ok as order will have very few pieces */
- BSONObjBuilder b;
- char p[2] = "0";
-
- while ( 1 ) {
- BSONObj j = order.getObjectField(p);
- if ( j.isEmpty() )
- break;
- BSONElement e = j.firstElement();
- uassert( 10102 , "bad order array", !e.eoo());
- uassert( 10103 , "bad order array [2]", e.isNumber());
- b.append(e);
- (*p)++;
- uassert( 10104 , "too many ordering elements", *p <= '9');
- }
-
- return b.obj();
- }
-
//int dump = 0;
@@ -328,7 +302,7 @@ namespace mongo {
}
else {
BSONObj js = c->current();
- fillQueryResultFromObj(b, cc->filter.get(), js);
+ fillQueryResultFromObj(b, cc->fields.get(), js);
n++;
if ( (ntoreturn>0 && (n >= ntoreturn || b.len() > MaxBytesToReturnToClientAtOnce)) ||
(ntoreturn==0 && b.len()>1*1024*1024) ) {
@@ -365,8 +339,8 @@ namespace mongo {
virtual void init() {
query_ = spec_.getObjectField( "query" );
c_ = qp().newCursor();
- matcher_.reset( new CoveredIndexMatcher( query_, c_->indexKeyPattern() ) );
- if ( qp().exactKeyMatch() && ! matcher_->needRecord() ) {
+ _matcher.reset( new CoveredIndexMatcher( query_, c_->indexKeyPattern() ) );
+ if ( qp().exactKeyMatch() && ! _matcher->needRecord() ) {
query_ = qp().simplifiedQuery( qp().indexKey() );
bc_ = dynamic_cast< BtreeCursor* >( c_.get() );
bc_->forgetEndKey();
@@ -398,7 +372,7 @@ namespace mongo {
_gotOne();
}
} else {
- if ( !matcher_->matches(c_->currKey(), c_->currLoc() ) ) {
+ if ( !_matcher->matches(c_->currKey(), c_->currLoc() ) ) {
}
else if( !c_->getsetdup(c_->currLoc()) ) {
_gotOne();
@@ -434,7 +408,7 @@ namespace mongo {
auto_ptr< Cursor > c_;
BSONObj query_;
BtreeCursor *bc_;
- auto_ptr< CoveredIndexMatcher > matcher_;
+ auto_ptr< CoveredIndexMatcher > _matcher;
BSONObj firstMatch_;
};
@@ -479,438 +453,389 @@ namespace mongo {
// Implements database 'query' requests using the query optimizer's QueryOp interface
class UserQueryOp : public QueryOp {
public:
- UserQueryOp( int ntoskip, int ntoreturn, const BSONObj &order, bool wantMore,
- bool explain, FieldMatcher *filter, int queryOptions ) :
- b_( 32768 ),
- ntoskip_( ntoskip ),
- ntoreturn_( ntoreturn ),
- order_( order ),
- wantMore_( wantMore ),
- explain_( explain ),
- filter_( filter ),
- ordering_(),
- nscanned_(),
- queryOptions_( queryOptions ),
- n_(),
- soSize_(),
- saveClientCursor_(),
- findingStart_( (queryOptions & QueryOption_OplogReplay) != 0 ),
- findingStartCursor_()
- {
- uassert( 10105 , "bad skip value in query", ntoskip >= 0);
- }
-
+
+ UserQueryOp( const ParsedQuery& pq ) :
+ //int ntoskip, int ntoreturn, const BSONObj &order, bool wantMore,
+ // bool explain, FieldMatcher *filter, int queryOptions ) :
+ _buf( 32768 ) , // TODO be smarter here
+ _pq( pq ) ,
+ _ntoskip( pq.getSkip() ) ,
+ _nscanned(0), _nscannedObjects(0),
+ _n(0),
+ _inMemSort(false),
+ _saveClientCursor(false),
+ _oplogReplay( pq.hasOption( QueryOption_OplogReplay) )
+ {}
+
virtual void init() {
- b_.skip( sizeof( QueryResult ) );
+ _buf.skip( sizeof( QueryResult ) );
- // findingStart mode is used to find the first operation of interest when
- // we are scanning through a repl log. For efficiency in the common case,
- // where the first operation of interest is closer to the tail than the head,
- // we start from the tail of the log and work backwards until we find the
- // first operation of interest. Then we scan forward from that first operation,
- // actually returning results to the client. During the findingStart phase,
- // we release the db mutex occasionally to avoid blocking the db process for
- // an extended period of time.
- if ( findingStart_ ) {
- // Use a ClientCursor here so we can release db mutex while scanning
- // oplog (can take quite a while with large oplogs).
- findingStartCursor_ = new ClientCursor();
- findingStartCursor_->noTimeout();
- findingStartCursor_->c = qp().newReverseCursor();
- findingStartCursor_->ns = qp().ns();
+ if ( _oplogReplay ) {
+ _findingStartCursor.reset( new FindingStartCursor( qp() ) );
} else {
- c_ = qp().newCursor();
+ _c = qp().newCursor( DiskLoc() , _pq.getNumToReturn() + _pq.getSkip() );
}
-
- matcher_.reset(new CoveredIndexMatcher(qp().query(), qp().indexKey()));
-
+ _matcher.reset(new CoveredIndexMatcher( qp().query() , qp().indexKey()));
+
if ( qp().scanAndOrderRequired() ) {
- ordering_ = true;
- so_.reset( new ScanAndOrder( ntoskip_, ntoreturn_, order_ ) );
- wantMore_ = false;
+ _inMemSort = true;
+ _so.reset( new ScanAndOrder( _pq.getSkip() , _pq.getNumToReturn() , _pq.getOrder() ) );
}
}
+
virtual void next() {
- if ( findingStart_ ) {
- if ( !findingStartCursor_ || !findingStartCursor_->c->ok() ) {
- findingStart_ = false;
- c_ = qp().newCursor();
- } else if ( !matcher_->matches( findingStartCursor_->c->currKey(), findingStartCursor_->c->currLoc() ) ) {
- findingStart_ = false;
- c_ = qp().newCursor( findingStartCursor_->c->currLoc() );
+ if ( _findingStartCursor.get() ) {
+ if ( _findingStartCursor->done() ) {
+ _c = _findingStartCursor->cRelease();
+ _findingStartCursor.reset( 0 );
} else {
- findingStartCursor_->c->advance();
- RARELY {
- CursorId id = findingStartCursor_->cursorid;
- findingStartCursor_->updateLocation();
- {
- dbtemprelease t;
- }
- findingStartCursor_ = ClientCursor::find( id, false );
- }
- return;
+ _findingStartCursor->next();
}
+ return;
}
- if ( findingStartCursor_ ) {
- ClientCursor::erase( findingStartCursor_->cursorid );
- findingStartCursor_ = 0;
- }
-
- if ( !c_->ok() ) {
+ if ( !_c->ok() ) {
finish();
return;
}
- bool mayCreateCursor1 = wantMore_ && ntoreturn_ != 1 && useCursors;
+ bool mayCreateCursor1 = _pq.wantMore() && ! _inMemSort && _pq.getNumToReturn() != 1 && useCursors;
if( 0 ) {
- BSONObj js = c_->current();
- cout << "SCANNING " << js << endl;
+ cout << "SCANNING this: " << this << " key: " << _c->currKey() << " obj: " << _c->current() << endl;
}
- nscanned_++;
- if ( !matcher_->matches(c_->currKey(), c_->currLoc() ) ) {
- ;
+ _nscanned++;
+ if ( !_matcher->matches(_c->currKey(), _c->currLoc() , &_details ) ) {
+ // not a match, continue onward
+ if ( _details.loadedObject )
+ _nscannedObjects++;
}
else {
- DiskLoc cl = c_->currLoc();
- if( !c_->getsetdup(cl) ) {
- BSONObj js = c_->current();
+ _nscannedObjects++;
+ DiskLoc cl = _c->currLoc();
+ if( !_c->getsetdup(cl) ) {
// got a match.
+
+ BSONObj js = _pq.returnKey() ? _c->currKey() : _c->current();
assert( js.objsize() >= 0 ); //defensive for segfaults
- if ( ordering_ ) {
+
+ if ( _inMemSort ) {
// note: no cursors for non-indexed, ordered results. results must be fairly small.
- so_->add(js);
+ _so->add(js);
}
- else if ( ntoskip_ > 0 ) {
- ntoskip_--;
- } else {
- if ( explain_ ) {
- n_++;
- if ( n_ >= ntoreturn_ && !wantMore_ ) {
+ else if ( _ntoskip > 0 ) {
+ _ntoskip--;
+ }
+ else {
+ if ( _pq.isExplain() ) {
+ _n++;
+ if ( _n >= _pq.getNumToReturn() && !_pq.wantMore() ) {
// .limit() was used, show just that much.
finish();
return;
}
}
else {
- fillQueryResultFromObj(b_, filter_, js);
- n_++;
- if ( (ntoreturn_>0 && (n_ >= ntoreturn_ || b_.len() > MaxBytesToReturnToClientAtOnce)) ||
- (ntoreturn_==0 && (b_.len()>1*1024*1024 || n_>=101)) ) {
- /* if ntoreturn is zero, we return up to 101 objects. on the subsequent getmore, there
- is only a size limit. The idea is that on a find() where one doesn't use much results,
- we don't return much, but once getmore kicks in, we start pushing significant quantities.
-
- The n limit (vs. size) is important when someone fetches only one small field from big
- objects, which causes massive scanning server-side.
- */
+ if ( _pq.returnKey() ){
+ BSONObjBuilder bb( _buf );
+ bb.appendKeys( _c->indexKeyPattern() , js );
+ bb.done();
+ }
+ else {
+ fillQueryResultFromObj( _buf , _pq.getFields() , js );
+ }
+ _n++;
+ if ( ! _c->supportGetMore() ){
+ if ( _pq.enough( _n ) || _buf.len() >= MaxBytesToReturnToClientAtOnce ){
+ finish();
+ return;
+ }
+ }
+ else if ( _pq.enoughForFirstBatch( _n , _buf.len() ) ){
/* if only 1 requested, no cursor saved for efficiency...we assume it is findOne() */
if ( mayCreateCursor1 ) {
- c_->advance();
- if ( c_->ok() ) {
+ _c->advance();
+ if ( _c->ok() ) {
// more...so save a cursor
- saveClientCursor_ = true;
+ _saveClientCursor = true;
}
}
finish();
return;
- }
+ }
}
}
}
}
- c_->advance();
+ _c->advance();
}
+
void finish() {
- if ( explain_ ) {
- n_ = ordering_ ? so_->size() : n_;
- } else if ( ordering_ ) {
- so_->fill(b_, filter_, n_);
- }
- if ( mayCreateCursor2() ) {
- c_->setTailable();
+ if ( _pq.isExplain() ) {
+ _n = _inMemSort ? _so->size() : _n;
+ }
+ else if ( _inMemSort ) {
+ _so->fill( _buf, _pq.getFields() , _n );
}
+
+ if ( _pq.hasOption( QueryOption_CursorTailable ) && _pq.getNumToReturn() != 1 )
+ _c->setTailable();
+
// If the tailing request succeeded.
- if ( c_->tailable() ) {
- saveClientCursor_ = true;
- }
+ if ( _c->tailable() )
+ _saveClientCursor = true;
+
setComplete();
}
- virtual bool mayRecordPlan() const { return ntoreturn_ != 1; }
+
+ virtual bool mayRecordPlan() const { return _pq.getNumToReturn() != 1; }
+
virtual QueryOp *clone() const {
- return new UserQueryOp( ntoskip_, ntoreturn_, order_, wantMore_, explain_, filter_, queryOptions_ );
- }
- BufBuilder &builder() { return b_; }
- bool scanAndOrderRequired() const { return ordering_; }
- auto_ptr< Cursor > cursor() { return c_; }
- auto_ptr< CoveredIndexMatcher > matcher() { return matcher_; }
- int n() const { return n_; }
- long long nscanned() const { return nscanned_; }
- bool saveClientCursor() const { return saveClientCursor_; }
- bool mayCreateCursor2() const { return ( queryOptions_ & QueryOption_CursorTailable ) && ntoreturn_ != 1; }
+ return new UserQueryOp( _pq );
+ }
+
+ BufBuilder &builder() { return _buf; }
+ bool scanAndOrderRequired() const { return _inMemSort; }
+ auto_ptr< Cursor > cursor() { return _c; }
+ auto_ptr< CoveredIndexMatcher > matcher() { return _matcher; }
+ int n() const { return _n; }
+ long long nscanned() const { return _nscanned; }
+ long long nscannedObjects() const { return _nscannedObjects; }
+ bool saveClientCursor() const { return _saveClientCursor; }
+
private:
- BufBuilder b_;
- int ntoskip_;
- int ntoreturn_;
- BSONObj order_;
- bool wantMore_;
- bool explain_;
- FieldMatcher *filter_;
- bool ordering_;
- auto_ptr< Cursor > c_;
- long long nscanned_;
- int queryOptions_;
- auto_ptr< CoveredIndexMatcher > matcher_;
- int n_;
- int soSize_;
- bool saveClientCursor_;
- auto_ptr< ScanAndOrder > so_;
- bool findingStart_;
- ClientCursor * findingStartCursor_;
+ BufBuilder _buf;
+ const ParsedQuery& _pq;
+
+ long long _ntoskip;
+ long long _nscanned;
+ long long _nscannedObjects;
+ int _n; // found so far
+
+ MatchDetails _details;
+
+ bool _inMemSort;
+ auto_ptr< ScanAndOrder > _so;
+
+ auto_ptr< Cursor > _c;
+
+ auto_ptr< CoveredIndexMatcher > _matcher;
+
+ bool _saveClientCursor;
+ bool _oplogReplay;
+ auto_ptr< FindingStartCursor > _findingStartCursor;
};
/* run a query -- includes checking for and running a Command */
auto_ptr< QueryResult > runQuery(Message& m, QueryMessage& q, CurOp& curop ) {
StringBuilder& ss = curop.debug().str;
+ ParsedQuery pq( q );
const char *ns = q.ns;
int ntoskip = q.ntoskip;
- int _ntoreturn = q.ntoreturn;
BSONObj jsobj = q.query;
- auto_ptr< FieldMatcher > filter = q.fields; // what fields to return (unspecified = full object)
int queryOptions = q.queryOptions;
BSONObj snapshotHint;
- Timer t;
if( logLevel >= 2 )
log() << "runQuery: " << ns << jsobj << endl;
long long nscanned = 0;
- bool wantMore = true;
- int ntoreturn = _ntoreturn;
- if ( _ntoreturn < 0 ) {
- /* _ntoreturn greater than zero is simply a hint on how many objects to send back per
- "cursor batch".
- A negative number indicates a hard limit.
- */
- ntoreturn = -_ntoreturn;
- wantMore = false;
- }
- ss << "query " << ns << " ntoreturn:" << ntoreturn;
+ ss << ns << " ntoreturn:" << pq.getNumToReturn();
curop.setQuery(jsobj);
- BufBuilder bb;
BSONObjBuilder cmdResBuf;
long long cursorid = 0;
- bb.skip(sizeof(QueryResult));
-
auto_ptr< QueryResult > qr;
int n = 0;
Client& c = cc();
- /* we assume you are using findOne() for running a cmd... */
- if ( ntoreturn == 1 && runCommands(ns, jsobj, curop, bb, cmdResBuf, false, queryOptions) ) {
- n = 1;
- qr.reset( (QueryResult *) bb.buf() );
- bb.decouple();
- qr->setResultFlagsToOk();
- qr->len = bb.len();
- ss << " reslen:" << bb.len();
- // qr->channel = 0;
- qr->setOperation(opReply);
- qr->cursorId = cursorid;
- qr->startingFrom = 0;
- qr->nReturned = n;
+
+ if ( pq.couldBeCommand() ){
+ BufBuilder bb;
+ bb.skip(sizeof(QueryResult));
+
+ if ( runCommands(ns, jsobj, curop, bb, cmdResBuf, false, queryOptions) ) {
+ ss << " command ";
+ curop.markCommand();
+ n = 1;
+ qr.reset( (QueryResult *) bb.buf() );
+ bb.decouple();
+ qr->setResultFlagsToOk();
+ qr->len = bb.len();
+ ss << " reslen:" << bb.len();
+ // qr->channel = 0;
+ qr->setOperation(opReply);
+ qr->cursorId = cursorid;
+ qr->startingFrom = 0;
+ qr->nReturned = n;
+ }
+ return qr;
}
- else {
- /* regular query */
-
- AuthenticationInfo *ai = currentClient.get()->ai;
- uassert( 10106 , "unauthorized", ai->isAuthorized(c.database()->name.c_str()));
-
- /* we allow queries to SimpleSlave's -- but not to the slave (nonmaster) member of a replica pair
- so that queries to a pair are realtime consistent as much as possible. use setSlaveOk() to
- query the nonmaster member of a replica pair.
- */
- uassert( 10107 , "not master", isMaster() || (queryOptions & QueryOption_SlaveOk) || slave == SimpleSlave );
-
- BSONElement hint;
- BSONObj min;
- BSONObj max;
- bool explain = false;
- bool _gotquery = false;
- bool snapshot = false;
- BSONObj query;
- {
- BSONElement e = jsobj.findElement("$query");
- if ( e.eoo() )
- e = jsobj.findElement("query");
- if ( !e.eoo() && (e.type() == Object || e.type() == Array) ) {
- query = e.embeddedObject();
- _gotquery = true;
- }
+
+ // regular query
+
+ mongolock lk(false); // read lock
+ Client::Context ctx( ns , dbpath , &lk );
+
+ /* we allow queries to SimpleSlave's -- but not to the slave (nonmaster) member of a replica pair
+ so that queries to a pair are realtime consistent as much as possible. use setSlaveOk() to
+ query the nonmaster member of a replica pair.
+ */
+ uassert( 10107 , "not master" , isMaster() || pq.hasOption( QueryOption_SlaveOk ) || replSettings.slave == SimpleSlave );
+
+ BSONElement hint = useHints ? pq.getHint() : BSONElement();
+ bool explain = pq.isExplain();
+ bool snapshot = pq.isSnapshot();
+ BSONObj query = pq.getFilter();
+ BSONObj order = pq.getOrder();
+
+ if ( pq.hasOption( QueryOption_CursorTailable ) ) {
+ NamespaceDetails *d = nsdetails( ns );
+ uassert( 13051, "tailable cursor requested on non capped collection", d && d->capped );
+ if ( order.isEmpty() ) {
+ order = BSON( "$natural" << 1 );
+ } else {
+ uassert( 13052, "only {$natural:1} order allowed for tailable cursor", order == BSON( "$natural" << 1 ) );
}
- BSONObj order;
- {
- BSONElement e = jsobj.findElement("$orderby");
- if ( e.eoo() )
- e = jsobj.findElement("orderby");
- if ( !e.eoo() ) {
- order = e.embeddedObjectUserCheck();
- if ( e.type() == Array )
- order = transformOrderFromArrayFormat(order);
+ }
+
+ if( snapshot ) {
+ NamespaceDetails *d = nsdetails(ns);
+ if ( d ){
+ int i = d->findIdIndex();
+ if( i < 0 ) {
+ if ( strstr( ns , ".system." ) == 0 )
+ log() << "warning: no _id index on $snapshot query, ns:" << ns << endl;
}
- }
- if ( !_gotquery && order.isEmpty() )
- query = jsobj;
- else {
- explain = jsobj.getBoolField("$explain");
- if ( useHints )
- hint = jsobj.getField("$hint");
- min = jsobj.getObjectField("$min");
- max = jsobj.getObjectField("$max");
- BSONElement e = jsobj.getField("$snapshot");
- snapshot = !e.eoo() && e.trueValue();
- if( snapshot ) {
- uassert( 12001 , "E12001 can't sort with $snapshot", order.isEmpty());
- uassert( 12002 , "E12002 can't use hint with $snapshot", hint.eoo());
- NamespaceDetails *d = nsdetails(ns);
- if ( d ){
- int i = d->findIdIndex();
- if( i < 0 ) {
- if ( strstr( ns , ".system." ) == 0 )
- log() << "warning: no _id index on $snapshot query, ns:" << ns << endl;
- }
- else {
- /* [dm] the name of an _id index tends to vary, so we build the hint the hard way here.
- probably need a better way to specify "use the _id index" as a hint. if someone is
- in the query optimizer please fix this then!
- */
- BSONObjBuilder b;
- b.append("$hint", d->idx(i).indexName());
- snapshotHint = b.obj();
- hint = snapshotHint.firstElement();
- }
- }
+ else {
+ /* [dm] the name of an _id index tends to vary, so we build the hint the hard way here.
+ probably need a better way to specify "use the _id index" as a hint. if someone is
+ in the query optimizer please fix this then!
+ */
+ BSONObjBuilder b;
+ b.append("$hint", d->idx(i).indexName());
+ snapshotHint = b.obj();
+ hint = snapshotHint.firstElement();
}
}
+ }
- /* The ElemIter will not be happy if this isn't really an object. So throw exception
- here when that is true.
- (Which may indicate bad data from client.)
- */
- if ( query.objsize() == 0 ) {
- out() << "Bad query object?\n jsobj:";
- out() << jsobj.toString() << "\n query:";
- out() << query.toString() << endl;
- uassert( 10110 , "bad query object", false);
- }
+ /* The ElemIter will not be happy if this isn't really an object. So throw exception
+ here when that is true.
+ (Which may indicate bad data from client.)
+ */
+ if ( query.objsize() == 0 ) {
+ out() << "Bad query object?\n jsobj:";
+ out() << jsobj.toString() << "\n query:";
+ out() << query.toString() << endl;
+ uassert( 10110 , "bad query object", false);
+ }
- bool idHackWorked = false;
- if ( strcmp( query.firstElement().fieldName() , "_id" ) == 0 && query.nFields() == 1 && query.firstElement().isSimpleType() ){
- nscanned = 1;
+ if ( ! explain && isSimpleIdQuery( query ) && !pq.hasOption( QueryOption_CursorTailable ) ) {
+ nscanned = 1;
- bool nsFound = false;
- bool indexFound = false;
+ bool nsFound = false;
+ bool indexFound = false;
- BSONObj resObject;
- bool found = Helpers::findById( c, ns , query , resObject , &nsFound , &indexFound );
- if ( nsFound == false || indexFound == true ){
- idHackWorked = true;
- if ( found ){
- n = 1;
- fillQueryResultFromObj( bb , filter.get() , resObject );
- }
- qr.reset( (QueryResult *) bb.buf() );
- bb.decouple();
- qr->setResultFlagsToOk();
- qr->len = bb.len();
- ss << " reslen:" << bb.len();
- qr->setOperation(opReply);
- qr->cursorId = cursorid;
- qr->startingFrom = 0;
- qr->nReturned = n;
- }
- }
-
- if ( ! idHackWorked ){ // non-simple _id lookup
- BSONObj oldPlan;
- if ( explain && hint.eoo() && min.isEmpty() && max.isEmpty() ) {
- QueryPlanSet qps( ns, query, order );
- if ( qps.usingPrerecordedPlan() )
- oldPlan = qps.explain();
- }
- QueryPlanSet qps( ns, query, order, &hint, !explain, min, max );
- UserQueryOp original( ntoskip, ntoreturn, order, wantMore, explain, filter.get(), queryOptions );
- shared_ptr< UserQueryOp > o = qps.runOp( original );
- UserQueryOp &dqo = *o;
- massert( 10362 , dqo.exceptionMessage(), dqo.complete() );
- n = dqo.n();
- nscanned = dqo.nscanned();
- if ( dqo.scanAndOrderRequired() )
- ss << " scanAndOrder ";
- auto_ptr< Cursor > c = dqo.cursor();
- log( 5 ) << " used cursor: " << c.get() << endl;
- if ( dqo.saveClientCursor() ) {
- ClientCursor *cc = new ClientCursor();
- if ( queryOptions & QueryOption_NoCursorTimeout )
- cc->noTimeout();
- cc->c = c;
- cursorid = cc->cursorid;
- cc->query = jsobj.getOwned();
- DEV out() << " query has more, cursorid: " << cursorid << endl;
- cc->matcher = dqo.matcher();
- cc->ns = ns;
- cc->pos = n;
- cc->filter = filter;
- cc->originalMessage = m;
- cc->updateLocation();
- if ( !cc->c->ok() && cc->c->tailable() ) {
- DEV out() << " query has no more but tailable, cursorid: " << cursorid << endl;
- } else {
- DEV out() << " query has more, cursorid: " << cursorid << endl;
- }
- }
- if ( explain ) {
- BSONObjBuilder builder;
- builder.append("cursor", c->toString());
- builder.append("startKey", c->prettyStartKey());
- builder.append("endKey", c->prettyEndKey());
- builder.append("nscanned", double( dqo.nscanned() ) );
- builder.append("n", n);
- if ( dqo.scanAndOrderRequired() )
- builder.append("scanAndOrder", true);
- builder.append("millis", t.millis());
- if ( !oldPlan.isEmpty() )
- builder.append( "oldPlan", oldPlan.firstElement().embeddedObject().firstElement().embeddedObject() );
- if ( hint.eoo() )
- builder.appendElements(qps.explain());
- BSONObj obj = builder.done();
- fillQueryResultFromObj(dqo.builder(), 0, obj);
+ BSONObj resObject;
+ bool found = Helpers::findById( c, ns , query , resObject , &nsFound , &indexFound );
+ if ( nsFound == false || indexFound == true ){
+ BufBuilder bb(sizeof(QueryResult)+resObject.objsize()+32);
+ bb.skip(sizeof(QueryResult));
+
+ ss << " idhack ";
+ if ( found ){
n = 1;
+ fillQueryResultFromObj( bb , pq.getFields() , resObject );
}
- qr.reset( (QueryResult *) dqo.builder().buf() );
- dqo.builder().decouple();
- qr->cursorId = cursorid;
+ qr.reset( (QueryResult *) bb.buf() );
+ bb.decouple();
qr->setResultFlagsToOk();
- qr->len = dqo.builder().len();
- ss << " reslen:" << qr->len;
+ qr->len = bb.len();
+ ss << " reslen:" << bb.len();
qr->setOperation(opReply);
+ qr->cursorId = cursorid;
qr->startingFrom = 0;
- qr->nReturned = n;
+ qr->nReturned = n;
+ return qr;
+ }
+ }
+
+ // regular, not QO bypass query
+
+ BSONObj oldPlan;
+ if ( explain && ! pq.hasIndexSpecifier() ){
+ QueryPlanSet qps( ns, query, order );
+ if ( qps.usingPrerecordedPlan() )
+ oldPlan = qps.explain();
+ }
+ QueryPlanSet qps( ns, query, order, &hint, !explain, pq.getMin(), pq.getMax() );
+ UserQueryOp original( pq );
+ shared_ptr< UserQueryOp > o = qps.runOp( original );
+ UserQueryOp &dqo = *o;
+ massert( 10362 , dqo.exceptionMessage(), dqo.complete() );
+ n = dqo.n();
+ nscanned = dqo.nscanned();
+ if ( dqo.scanAndOrderRequired() )
+ ss << " scanAndOrder ";
+ auto_ptr<Cursor> cursor = dqo.cursor();
+ log( 5 ) << " used cursor: " << cursor.get() << endl;
+ if ( dqo.saveClientCursor() ) {
+ // the clientcursor now owns the Cursor* and 'c' is released:
+ ClientCursor *cc = new ClientCursor(cursor, ns, !(queryOptions & QueryOption_NoCursorTimeout));
+ cursorid = cc->cursorid;
+ cc->query = jsobj.getOwned();
+ DEV out() << " query has more, cursorid: " << cursorid << endl;
+ cc->matcher = dqo.matcher();
+ cc->pos = n;
+ cc->fields = pq.getFieldPtr();
+ cc->originalMessage = m;
+ cc->updateLocation();
+ if ( !cc->c->ok() && cc->c->tailable() ) {
+ DEV out() << " query has no more but tailable, cursorid: " << cursorid << endl;
+ } else {
+ DEV out() << " query has more, cursorid: " << cursorid << endl;
}
}
+ if ( explain ) {
+ BSONObjBuilder builder;
+ builder.append("cursor", cursor->toString());
+ builder.appendArray("indexBounds", cursor->prettyIndexBounds());
+ builder.appendNumber("nscanned", dqo.nscanned() );
+ builder.appendNumber("nscannedObjects", dqo.nscannedObjects() );
+ builder.append("n", n);
+ if ( dqo.scanAndOrderRequired() )
+ builder.append("scanAndOrder", true);
+ builder.append("millis", curop.elapsedMillis());
+ if ( !oldPlan.isEmpty() )
+ builder.append( "oldPlan", oldPlan.firstElement().embeddedObject().firstElement().embeddedObject() );
+ if ( hint.eoo() )
+ builder.appendElements(qps.explain());
+ BSONObj obj = builder.done();
+ fillQueryResultFromObj(dqo.builder(), 0, obj);
+ n = 1;
+ }
+ qr.reset( (QueryResult *) dqo.builder().buf() );
+ dqo.builder().decouple();
+ qr->cursorId = cursorid;
+ qr->setResultFlagsToOk();
+ qr->len = dqo.builder().len();
+ ss << " reslen:" << qr->len;
+ qr->setOperation(opReply);
+ qr->startingFrom = 0;
+ qr->nReturned = n;
+
- int duration = t.millis();
- Database *database = c.database();
- if ( (database && database->profile) || duration >= 100 ) {
+ int duration = curop.elapsedMillis();
+ bool dbprofile = curop.shouldDBProfile( duration );
+ if ( dbprofile || duration >= cmdLine.slowMS ) {
ss << " nscanned:" << nscanned << ' ';
if ( ntoskip )
ss << " ntoskip:" << ntoskip;
- if ( database && database->profile )
+ if ( dbprofile )
ss << " \nquery: ";
ss << jsobj << ' ';
}
diff --git a/db/query.h b/db/query.h
index d69b6d9..fdc33c9 100644
--- a/db/query.h
+++ b/db/query.h
@@ -22,7 +22,7 @@
#include "../util/message.h"
#include "dbmessage.h"
#include "jsobj.h"
-#include "storage.h"
+#include "diskloc.h"
/* db request message format
@@ -71,13 +71,15 @@
namespace mongo {
+ extern const int MaxBytesToReturnToClientAtOnce;
+
// for an existing query (ie a ClientCursor), send back additional information.
QueryResult* getMore(const char *ns, int ntoreturn, long long cursorid , CurOp& op);
struct UpdateResult {
bool existing;
bool mod;
- unsigned long long num;
+ long long num;
UpdateResult( bool e, bool m, unsigned long long n )
: existing(e) , mod(m), num(n ){}
@@ -100,16 +102,213 @@ namespace mongo {
/* returns true if an existing object was updated, false if no existing object was found.
multi - update multiple objects - mostly useful with things like $set
+ god - allow access to system namespaces and don't yield
*/
- UpdateResult updateObjects(const char *ns, BSONObj updateobj, BSONObj pattern, bool upsert, bool multi , bool logop , OpDebug& debug );
+ UpdateResult updateObjects(const char *ns, const BSONObj& updateobj, BSONObj pattern, bool upsert, bool multi , bool logop , OpDebug& debug );
// If justOne is true, deletedId is set to the id of the deleted object.
- int deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool logop = false, bool god=false);
+ long long deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool logop = false, bool god=false);
long long runCount(const char *ns, const BSONObj& cmd, string& err);
auto_ptr< QueryResult > runQuery(Message& m, QueryMessage& q, CurOp& curop );
+ /* This is for languages whose "objects" are not well ordered (JSON is well ordered).
+ [ { a : ... } , { b : ... } ] -> { a : ..., b : ... }
+ */
+ inline BSONObj transformOrderFromArrayFormat(BSONObj order) {
+ /* note: this is slow, but that is ok as order will have very few pieces */
+ BSONObjBuilder b;
+ char p[2] = "0";
+
+ while ( 1 ) {
+ BSONObj j = order.getObjectField(p);
+ if ( j.isEmpty() )
+ break;
+ BSONElement e = j.firstElement();
+ uassert( 10102 , "bad order array", !e.eoo());
+ uassert( 10103 , "bad order array [2]", e.isNumber());
+ b.append(e);
+ (*p)++;
+ uassert( 10104 , "too many ordering elements", *p <= '9');
+ }
+
+ return b.obj();
+ }
+
+ /**
+ * this represents a total user query
+ * includes fields from the query message, both possible query levels
+ * parses everything up front
+ */
+ class ParsedQuery {
+ public:
+ ParsedQuery( QueryMessage& qm )
+ : _ns( qm.ns ) , _ntoskip( qm.ntoskip ) , _ntoreturn( qm.ntoreturn ) , _options( qm.queryOptions ){
+ init( qm.query );
+ initFields( qm.fields );
+ }
+ ParsedQuery( const char* ns , int ntoskip , int ntoreturn , int queryoptions , const BSONObj& query , const BSONObj& fields )
+ : _ns( ns ) , _ntoskip( ntoskip ) , _ntoreturn( ntoreturn ) , _options( queryoptions ){
+ init( query );
+ initFields( fields );
+ }
+
+ ~ParsedQuery(){}
+
+ const char * ns() const { return _ns; }
+
+ const BSONObj& getFilter() const { return _filter; }
+ FieldMatcher* getFields() const { return _fields.get(); }
+ shared_ptr<FieldMatcher> getFieldPtr() const { return _fields; }
+
+ int getSkip() const { return _ntoskip; }
+ int getNumToReturn() const { return _ntoreturn; }
+ bool wantMore() const { return _wantMore; }
+ int getOptions() const { return _options; }
+ bool hasOption( int x ) const { return x & _options; }
+
+
+ bool isExplain() const { return _explain; }
+ bool isSnapshot() const { return _snapshot; }
+ bool returnKey() const { return _returnKey; }
+
+ const BSONObj& getMin() const { return _min; }
+ const BSONObj& getMax() const { return _max; }
+ const BSONObj& getOrder() const { return _order; }
+ const BSONElement& getHint() const { return _hint; }
+
+ bool couldBeCommand() const {
+ /* we assume you are using findOne() for running a cmd... */
+ return _ntoreturn == 1 && strstr( _ns , ".$cmd" );
+ }
+
+ bool hasIndexSpecifier() const {
+ return ! _hint.eoo() || ! _min.isEmpty() || ! _max.isEmpty();
+ }
+
+ /* if ntoreturn is zero, we return up to 101 objects. on the subsequent getmore, there
+ is only a size limit. The idea is that on a find() where one doesn't use much results,
+ we don't return much, but once getmore kicks in, we start pushing significant quantities.
+
+ The n limit (vs. size) is important when someone fetches only one small field from big
+ objects, which causes massive scanning server-side.
+ */
+ bool enoughForFirstBatch( int n , int len ) const {
+ if ( _ntoreturn == 0 )
+ return ( len > 1024 * 1024 ) || n >= 101;
+ return n >= _ntoreturn || len > MaxBytesToReturnToClientAtOnce;
+ }
+
+ bool enough( int n ) const {
+ if ( _ntoreturn == 0 )
+ return false;
+ return n >= _ntoreturn;
+ }
+
+ private:
+ void init( const BSONObj& q ){
+ _reset();
+ uassert( 10105 , "bad skip value in query", _ntoskip >= 0);
+
+ if ( _ntoreturn < 0 ){
+ /* _ntoreturn greater than zero is simply a hint on how many objects to send back per
+ "cursor batch".
+ A negative number indicates a hard limit.
+ */
+ _wantMore = false;
+ _ntoreturn = -_ntoreturn;
+ }
+
+
+ BSONElement e = q["query"];
+ if ( ! e.isABSONObj() )
+ e = q["$query"];
+
+ if ( e.isABSONObj() ){
+ _filter = e.embeddedObject();
+ _initTop( q );
+ }
+ else {
+ _filter = q;
+ }
+ }
+
+ void _reset(){
+ _wantMore = true;
+ _explain = false;
+ _snapshot = false;
+ _returnKey = false;
+ }
+
+ void _initTop( const BSONObj& top ){
+ BSONObjIterator i( top );
+ while ( i.more() ){
+ BSONElement e = i.next();
+ const char * name = e.fieldName();
+
+ if ( strcmp( "$orderby" , name ) == 0 ||
+ strcmp( "orderby" , name ) == 0 ){
+ if ( e.type() == Object )
+ _order = e.embeddedObject();
+ else if ( e.type() == Array )
+ _order = transformOrderFromArrayFormat( _order );
+ else
+ assert( 0 );
+ }
+ else if ( strcmp( "$explain" , name ) == 0 )
+ _explain = e.trueValue();
+ else if ( strcmp( "$snapshot" , name ) == 0 )
+ _snapshot = e.trueValue();
+ else if ( strcmp( "$min" , name ) == 0 )
+ _min = e.embeddedObject();
+ else if ( strcmp( "$max" , name ) == 0 )
+ _max = e.embeddedObject();
+ else if ( strcmp( "$hint" , name ) == 0 )
+ _hint = e;
+ else if ( strcmp( "$returnKey" , name ) == 0 )
+ _returnKey = e.trueValue();
+
+ }
+
+ if ( _snapshot ){
+ uassert( 12001 , "E12001 can't sort with $snapshot", _order.isEmpty() );
+ uassert( 12002 , "E12002 can't use hint with $snapshot", _hint.eoo() );
+ }
+
+ }
+
+ void initFields( const BSONObj& fields ){
+ if ( fields.isEmpty() )
+ return;
+ _fields.reset( new FieldMatcher() );
+ _fields->add( fields );
+ }
+
+ ParsedQuery( const ParsedQuery& other ){
+ assert(0);
+ }
+
+ const char* _ns;
+ int _ntoskip;
+ int _ntoreturn;
+ int _options;
+
+ BSONObj _filter;
+ shared_ptr< FieldMatcher > _fields;
+
+ bool _wantMore;
+
+ bool _explain;
+ bool _snapshot;
+ bool _returnKey;
+ BSONObj _min;
+ BSONObj _max;
+ BSONElement _hint;
+ BSONObj _order;
+ };
+
+
} // namespace mongo
#include "clientcursor.h"
diff --git a/db/queryoptimizer.cpp b/db/queryoptimizer.cpp
index 499417a..fa08323 100644
--- a/db/queryoptimizer.cpp
+++ b/db/queryoptimizer.cpp
@@ -24,6 +24,9 @@
#include "queryoptimizer.h"
#include "cmdline.h"
+//#define DEBUGQO(x) cout << x << endl;
+#define DEBUGQO(x)
+
namespace mongo {
void checkTableScanAllowed( const char * ns ){
@@ -39,7 +42,7 @@ namespace mongo {
uassert( 10111 , (string)"table scans not allowed:" + ns , ! cmdLine.notablescan );
}
-
+
double elementDirection( const BSONElement &e ) {
if ( e.isNumber() )
return e.number();
@@ -48,7 +51,7 @@ namespace mongo {
QueryPlan::QueryPlan(
NamespaceDetails *_d, int _idxNo,
- const FieldRangeSet &fbs, const BSONObj &order, const BSONObj &startKey, const BSONObj &endKey ) :
+ const FieldRangeSet &fbs, const BSONObj &order, const BSONObj &startKey, const BSONObj &endKey , string special ) :
d(_d), idxNo(_idxNo),
fbs_( fbs ),
order_( order ),
@@ -58,7 +61,9 @@ namespace mongo {
exactKeyMatch_( false ),
direction_( 0 ),
endKeyInclusive_( endKey.isEmpty() ),
- unhelpful_( false ) {
+ unhelpful_( false ),
+ _special( special ),
+ _type(0){
if ( !fbs_.matchPossible() ) {
unhelpful_ = true;
@@ -75,6 +80,14 @@ namespace mongo {
return;
}
+ if ( _special.size() ){
+ optimal_ = true;
+ _type = index_->getSpec().getType();
+ massert( 13040 , (string)"no type for special: " + _special , _type );
+ scanAndOrderRequired_ = _type->scanAndOrderRequired( fbs.query() , order );
+ return;
+ }
+
BSONObj idxKey = index_->keyPattern();
BSONObjIterator o( order );
BSONObjIterator k( idxKey );
@@ -163,7 +176,11 @@ namespace mongo {
unhelpful_ = true;
}
- auto_ptr< Cursor > QueryPlan::newCursor( const DiskLoc &startLoc ) const {
+ auto_ptr< Cursor > QueryPlan::newCursor( const DiskLoc &startLoc , int numWanted ) const {
+
+ if ( _type )
+ return _type->newCursor( fbs_.query() , order_ , numWanted );
+
if ( !fbs_.matchPossible() ){
if ( fbs_.nNontrivialRanges() )
checkTableScanAllowed( fbs_.ns() );
@@ -206,13 +223,14 @@ namespace mongo {
void QueryPlan::registerSelf( long long nScanned ) const {
if ( fbs_.matchPossible() ) {
- boostlock lk(NamespaceDetailsTransient::_qcMutex);
+ scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
NamespaceDetailsTransient::get_inlock( ns() ).registerIndexForPattern( fbs_.pattern( order_ ), indexKey(), nScanned );
}
}
QueryPlanSet::QueryPlanSet( const char *_ns, const BSONObj &query, const BSONObj &order, const BSONElement *hint, bool honorRecordedPlan, const BSONObj &min, const BSONObj &max ) :
ns(_ns),
+ query_( query.getOwned() ),
fbs_( _ns, query ),
mayRecordPlan_( true ),
usingPrerecordedPlan_( false ),
@@ -223,9 +241,7 @@ namespace mongo {
min_( min.getOwned() ),
max_( max.getOwned() ) {
if ( hint && !hint->eoo() ) {
- BSONObjBuilder b;
- b.append( *hint );
- hint_ = b.obj();
+ hint_ = hint->wrap();
}
init();
}
@@ -242,6 +258,7 @@ namespace mongo {
}
void QueryPlanSet::init() {
+ DEBUGQO( "QueryPlanSet::init " << ns << "\t" << query_ );
plans_.clear();
mayRecordPlan_ = true;
usingPrerecordedPlan_ = false;
@@ -297,9 +314,43 @@ namespace mongo {
plans_.push_back( PlanPtr( new QueryPlan( d, d->idxNo(*idx), fbs_, order_, min_, max_ ) ) );
return;
}
-
+
+ if ( isSimpleIdQuery( query_ ) ){
+ int idx = d->findIdIndex();
+ if ( idx >= 0 ){
+ usingPrerecordedPlan_ = true;
+ mayRecordPlan_ = false;
+ plans_.push_back( PlanPtr( new QueryPlan( d , idx , fbs_ , order_ ) ) );
+ return;
+ }
+ }
+
+ if ( query_.isEmpty() && order_.isEmpty() ){
+ plans_.push_back( PlanPtr( new QueryPlan( d, -1, fbs_, order_ ) ) );
+ return;
+ }
+
+ DEBUGQO( "\t special : " << fbs_.getSpecial() );
+ if ( fbs_.getSpecial().size() ){
+ _special = fbs_.getSpecial();
+ NamespaceDetails::IndexIterator i = d->ii();
+ while( i.more() ) {
+ int j = i.pos();
+ IndexDetails& ii = i.next();
+ const IndexSpec& spec = ii.getSpec();
+ if ( spec.getTypeName() == _special && spec.suitability( query_ , order_ ) ){
+ usingPrerecordedPlan_ = true;
+ mayRecordPlan_ = true;
+ plans_.push_back( PlanPtr( new QueryPlan( d , j , fbs_ , order_ ,
+ BSONObj() , BSONObj() , _special ) ) );
+ return;
+ }
+ }
+ uassert( 13038 , (string)"can't find special index: " + _special + " for: " + query_.toString() , 0 );
+ }
+
if ( honorRecordedPlan_ ) {
- boostlock lk(NamespaceDetailsTransient::_qcMutex);
+ scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
NamespaceDetailsTransient& nsd = NamespaceDetailsTransient::get_inlock( ns );
BSONObj bestIndex = nsd.indexForPattern( fbs_.pattern( order_ ) );
if ( !bestIndex.isEmpty() ) {
@@ -334,7 +385,7 @@ namespace mongo {
if ( !d )
return;
- // If table scan is optimal or natural order requested
+ // If table scan is optimal or natural order requested or tailable cursor requested
if ( !fbs_.matchPossible() || ( fbs_.nNontrivialRanges() == 0 && order_.isEmpty() ) ||
( !order_.isEmpty() && !strcmp( order_.firstElement().fieldName(), "$natural" ) ) ) {
// Table scan plan
@@ -342,8 +393,19 @@ namespace mongo {
return;
}
+ bool normalQuery = hint_.isEmpty() && min_.isEmpty() && max_.isEmpty();
+
PlanSet plans;
for( int i = 0; i < d->nIndexes; ++i ) {
+ IndexDetails& id = d->idx(i);
+ const IndexSpec& spec = id.getSpec();
+ IndexSuitability suitability = HELPFUL;
+ if ( normalQuery ){
+ suitability = spec.suitability( query_ , order_ );
+ if ( suitability == USELESS )
+ continue;
+ }
+
PlanPtr p( new QueryPlan( d, i, fbs_, order_ ) );
if ( p->optimal() ) {
addPlan( p, checkFirst );
@@ -367,7 +429,7 @@ namespace mongo {
if ( res->complete() || plans_.size() > 1 )
return res;
{
- boostlock lk(NamespaceDetailsTransient::_qcMutex);
+ scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
NamespaceDetailsTransient::get_inlock( fbs_.ns() ).registerIndexForPattern( fbs_.pattern( order_ ), BSONObj(), 0 );
}
init();
@@ -380,7 +442,10 @@ namespace mongo {
vector< BSONObj > arr;
for( PlanSet::const_iterator i = plans_.begin(); i != plans_.end(); ++i ) {
auto_ptr< Cursor > c = (*i)->newCursor();
- arr.push_back( BSON( "cursor" << c->toString() << "startKey" << c->prettyStartKey() << "endKey" << c->prettyEndKey() ) );
+ BSONObjBuilder explain;
+ explain.append( "cursor", c->toString() );
+ explain.appendArray( "indexBounds", c->prettyIndexBounds() );
+ arr.push_back( explain.obj() );
}
BSONObjBuilder b;
b.append( "allPlans", arr );
@@ -433,7 +498,7 @@ namespace mongo {
}
if ( errCount == ops.size() )
break;
- if ( plans_.usingPrerecordedPlan_ && nScanned > plans_.oldNScanned_ * 10 ) {
+ if ( plans_.usingPrerecordedPlan_ && nScanned > plans_.oldNScanned_ * 10 && plans_._special.empty() ) {
plans_.addOtherPlans( true );
PlanSet::iterator i = plans_.plans_.begin();
++i;
@@ -558,7 +623,7 @@ namespace mongo {
return 0;
}
- setClient( ns );
+ Client::Context ctx( ns );
IndexDetails *id = 0;
NamespaceDetails *d = nsdetails( ns );
if ( !d ) {
@@ -576,9 +641,11 @@ namespace mongo {
while( i.more() ) {
IndexDetails& ii = i.next();
if ( indexWorks( ii.keyPattern(), min.isEmpty() ? max : min, ret.first, ret.second ) ) {
- id = &ii;
- keyPattern = ii.keyPattern();
- break;
+ if ( ii.getSpec().getType() == 0 ){
+ id = &ii;
+ keyPattern = ii.keyPattern();
+ break;
+ }
}
}
diff --git a/db/queryoptimizer.h b/db/queryoptimizer.h
index e4a79d8..1cb5052 100644
--- a/db/queryoptimizer.h
+++ b/db/queryoptimizer.h
@@ -25,6 +25,8 @@
namespace mongo {
class IndexDetails;
+ class IndexType;
+
class QueryPlan : boost::noncopyable {
public:
QueryPlan(NamespaceDetails *_d,
@@ -32,7 +34,8 @@ namespace mongo {
const FieldRangeSet &fbs,
const BSONObj &order,
const BSONObj &startKey = BSONObj(),
- const BSONObj &endKey = BSONObj() );
+ const BSONObj &endKey = BSONObj() ,
+ string special="" );
/* If true, no other index can do better. */
bool optimal() const { return optimal_; }
@@ -46,10 +49,11 @@ namespace mongo {
requested sort order */
bool unhelpful() const { return unhelpful_; }
int direction() const { return direction_; }
- auto_ptr< Cursor > newCursor( const DiskLoc &startLoc = DiskLoc() ) const;
+ auto_ptr< Cursor > newCursor( const DiskLoc &startLoc = DiskLoc() , int numWanted=0 ) const;
auto_ptr< Cursor > newReverseCursor() const;
BSONObj indexKey() const;
const char *ns() const { return fbs_.ns(); }
+ NamespaceDetails *nsd() const { return d; }
BSONObj query() const { return fbs_.query(); }
BSONObj simplifiedQuery( const BSONObj& fields = BSONObj() ) const { return fbs_.simplifiedQuery( fields ); }
const FieldRange &range( const char *fieldName ) const { return fbs_.range( fieldName ); }
@@ -69,6 +73,8 @@ namespace mongo {
BoundList indexBounds_;
bool endKeyInclusive_;
bool unhelpful_;
+ string _special;
+ IndexType * _type;
};
// Inherit from this interface to implement a new query operation.
@@ -78,11 +84,15 @@ namespace mongo {
public:
QueryOp() : complete_(), qp_(), error_() {}
virtual ~QueryOp() {}
+
+ /** this gets called after a query plan is set? ERH 2/16/10 */
virtual void init() = 0;
virtual void next() = 0;
virtual bool mayRecordPlan() const = 0;
- // Return a copy of the inheriting class, which will be run with its own
- // query plan.
+
+ /** @return a copy of the inheriting class, which will be run with its own
+ query plan.
+ */
virtual QueryOp *clone() const = 0;
bool complete() const { return complete_; }
bool error() const { return error_; }
@@ -143,6 +153,7 @@ namespace mongo {
static void nextOp( QueryOp &op );
};
const char *ns;
+ BSONObj query_;
FieldRangeSet fbs_;
PlanSet plans_;
bool mayRecordPlan_;
@@ -153,9 +164,17 @@ namespace mongo {
bool honorRecordedPlan_;
BSONObj min_;
BSONObj max_;
+ string _special;
};
// NOTE min, max, and keyPattern will be updated to be consistent with the selected index.
IndexDetails *indexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern );
+
+ inline bool isSimpleIdQuery( const BSONObj& query ){
+ return
+ strcmp( query.firstElement().fieldName() , "_id" ) == 0 &&
+ query.nFields() == 1 &&
+ query.firstElement().isSimpleType();
+ }
} // namespace mongo
diff --git a/db/queryutil.cpp b/db/queryutil.cpp
index d8854be..c01b89e 100644
--- a/db/queryutil.cpp
+++ b/db/queryutil.cpp
@@ -24,96 +24,118 @@
#include "../util/unittest.h"
namespace mongo {
- namespace {
- /** returns a string that when used as a matcher, would match a super set of regex()
- returns "" for complex regular expressions
- used to optimize queries in some simple regex cases that start with '^'
- */
- inline string simpleRegexHelper(const char* regex, const char* flags){
- string r = "";
-
- bool extended = false;
- while (*flags){
- switch (*(flags++)){
- case 'm': // multiline
- continue;
- case 'x': // extended
- extended = true;
- break;
- default:
- return r; // cant use index
- }
- }
+ /** returns a string that when used as a matcher, would match a super set of regex()
+ returns "" for complex regular expressions
+ used to optimize queries in some simple regex cases that start with '^'
- if ( *(regex++) != '^' )
- return r;
+ if purePrefix != NULL, sets it to whether the regex can be converted to a range query
+ */
+ string simpleRegex(const char* regex, const char* flags, bool* purePrefix){
+ string r = "";
- stringstream ss;
+ if (purePrefix) *purePrefix = false;
- while(*regex){
- char c = *(regex++);
- if ( c == '*' || c == '?' ){
- // These are the only two symbols that make the last char optional
- r = ss.str();
- r = r.substr( 0 , r.size() - 1 );
- return r; //breaking here fails with /^a?/
- } else if (c == '\\'){
- // slash followed by non-alphanumeric represents the following char
- c = *(regex++);
- if ((c >= 'A' && c <= 'Z') ||
- (c >= 'a' && c <= 'z') ||
- (c >= '0' && c <= '0') ||
- (c == '\0'))
- {
- r = ss.str();
- break;
- } else {
- ss << c;
- }
- } else if (strchr("^$.[|()+{", c)){
- // list of "metacharacters" from man pcrepattern
- r = ss.str();
+ bool extended = false;
+ while (*flags){
+ switch (*(flags++)){
+ case 'm': // multiline
+ continue;
+ case 'x': // extended
+ extended = true;
break;
- } else if (extended && c == '#'){
- // comment
+ default:
+ return r; // cant use index
+ }
+ }
+
+ if ( *(regex++) != '^' )
+ return r;
+
+ stringstream ss;
+
+ while(*regex){
+ char c = *(regex++);
+ if ( c == '*' || c == '?' ){
+ // These are the only two symbols that make the last char optional
+ r = ss.str();
+ r = r.substr( 0 , r.size() - 1 );
+ return r; //breaking here fails with /^a?/
+ } else if (c == '\\'){
+ // slash followed by non-alphanumeric represents the following char
+ c = *(regex++);
+ if ((c >= 'A' && c <= 'Z') ||
+ (c >= 'a' && c <= 'z') ||
+ (c >= '0' && c <= '0') ||
+ (c == '\0'))
+ {
r = ss.str();
break;
- } else if (extended && isspace(c)){
- continue;
} else {
- // self-matching char
ss << c;
}
- }
-
- if ( r.size() == 0 && *regex == 0 )
+ } else if (strchr("^$.[|()+{", c)){
+ // list of "metacharacters" from man pcrepattern
r = ss.str();
+ break;
+ } else if (extended && c == '#'){
+ // comment
+ r = ss.str();
+ break;
+ } else if (extended && isspace(c)){
+ continue;
+ } else {
+ // self-matching char
+ ss << c;
+ }
+ }
- return r;
+ if ( r.empty() && *regex == 0 ){
+ r = ss.str();
+ if (purePrefix) *purePrefix = !r.empty();
}
- inline string simpleRegex(const BSONElement& e){
- switch(e.type()){
- case RegEx:
- return simpleRegexHelper(e.regex(), e.regexFlags());
- case Object:{
- BSONObj o = e.embeddedObject();
- return simpleRegexHelper(o["$regex"].valuestrsafe(), o["$options"].valuestrsafe());
- }
- default: assert(false); return ""; //return squashes compiler warning
+
+ return r;
+ }
+ inline string simpleRegex(const BSONElement& e){
+ switch(e.type()){
+ case RegEx:
+ return simpleRegex(e.regex(), e.regexFlags());
+ case Object:{
+ BSONObj o = e.embeddedObject();
+ return simpleRegex(o["$regex"].valuestrsafe(), o["$options"].valuestrsafe());
}
+ default: assert(false); return ""; //return squashes compiler warning
}
}
+
+ string simpleRegexEnd( string regex ) {
+ ++regex[ regex.length() - 1 ];
+ return regex;
+ }
- FieldRange::FieldRange( const BSONElement &e, bool optimize ) {
- if ( !e.eoo() && e.type() != RegEx && e.getGtLtOp() == BSONObj::opIN ) {
+
+ FieldRange::FieldRange( const BSONElement &e, bool isNot, bool optimize ) {
+ // NOTE with $not, we could potentially form a complementary set of intervals.
+ if ( !isNot && !e.eoo() && e.type() != RegEx && e.getGtLtOp() == BSONObj::opIN ) {
set< BSONElement, element_lt > vals;
+ vector< FieldRange > regexes;
+ uassert( 12580 , "invalid query" , e.isABSONObj() );
BSONObjIterator i( e.embeddedObject() );
- while( i.more() )
- vals.insert( i.next() );
+ while( i.more() ) {
+ BSONElement ie = i.next();
+ if ( ie.type() == RegEx ) {
+ regexes.push_back( FieldRange( ie, false, optimize ) );
+ } else {
+ vals.insert( ie );
+ }
+ }
for( set< BSONElement, element_lt >::const_iterator i = vals.begin(); i != vals.end(); ++i )
intervals_.push_back( FieldInterval(*i) );
+ for( vector< FieldRange >::const_iterator i = regexes.begin(); i != regexes.end(); ++i )
+ *this |= *i;
+
return;
}
@@ -149,15 +171,66 @@ namespace mongo {
|| (e.type() == Object && !e.embeddedObject()["$regex"].eoo())
)
{
- const string r = simpleRegex(e);
- if ( r.size() ) {
- lower = addObj( BSON( "" << r ) ).firstElement();
- upper = addObj( BSON( "" << simpleRegexEnd( r ) ) ).firstElement();
- upperInclusive = false;
- }
+ if ( !isNot ) { // no optimization for negated regex - we could consider creating 2 intervals comprising all nonmatching prefixes
+ const string r = simpleRegex(e);
+ if ( r.size() ) {
+ lower = addObj( BSON( "" << r ) ).firstElement();
+ upper = addObj( BSON( "" << simpleRegexEnd( r ) ) ).firstElement();
+ upperInclusive = false;
+ } else {
+ BSONObjBuilder b1(32), b2(32);
+ b1.appendMinForType( "" , String );
+ lower = addObj( b1.obj() ).firstElement();
+
+ b2.appendMaxForType( "" , String );
+ upper = addObj( b2.obj() ).firstElement();
+ upperInclusive = false; //MaxForType String is an empty Object
+ }
+
+ // regex matches self - regex type > string type
+ if (e.type() == RegEx){
+ BSONElement re = addObj( BSON( "" << e ) ).firstElement();
+ intervals_.push_back( FieldInterval(re) );
+ } else {
+ BSONObj orig = e.embeddedObject();
+ BSONObjBuilder b;
+ b.appendRegex("", orig["$regex"].valuestrsafe(), orig["$options"].valuestrsafe());
+ BSONElement re = addObj( b.obj() ).firstElement();
+ intervals_.push_back( FieldInterval(re) );
+ }
+
+ }
return;
}
- switch( e.getGtLtOp() ) {
+ int op = e.getGtLtOp();
+ if ( isNot ) {
+ switch( op ) {
+ case BSONObj::Equality:
+ case BSONObj::opALL:
+ case BSONObj::opMOD: // NOTE for mod and type, we could consider having 1-2 intervals comprising the complementary types (multiple intervals already possible with $in)
+ case BSONObj::opTYPE:
+ op = BSONObj::NE; // no bound calculation
+ break;
+ case BSONObj::NE:
+ op = BSONObj::Equality;
+ break;
+ case BSONObj::LT:
+ op = BSONObj::GTE;
+ break;
+ case BSONObj::LTE:
+ op = BSONObj::GT;
+ break;
+ case BSONObj::GT:
+ op = BSONObj::LTE;
+ break;
+ case BSONObj::GTE:
+ op = BSONObj::LT;
+ break;
+ default: // otherwise doesn't matter
+ break;
+ }
+ }
+ switch( op ) {
case BSONObj::Equality:
lower = upper = e;
break;
@@ -174,8 +247,32 @@ namespace mongo {
case BSONObj::opALL: {
massert( 10370 , "$all requires array", e.type() == Array );
BSONObjIterator i( e.embeddedObject() );
- if ( i.more() )
- lower = upper = i.next();
+ bool bound = false;
+ while ( i.more() ){
+ BSONElement x = i.next();
+ if ( x.type() == Object && x.embeddedObject().firstElement().getGtLtOp() == BSONObj::opELEM_MATCH ){
+ // taken care of elsewhere
+ }
+ else if ( x.type() != RegEx ) {
+ lower = upper = x;
+ bound = true;
+ break;
+ }
+ }
+ if ( !bound ) { // if no good non regex bound found, try regex bounds
+ BSONObjIterator i( e.embeddedObject() );
+ while( i.more() ) {
+ BSONElement x = i.next();
+ if ( x.type() != RegEx )
+ continue;
+ string simple = simpleRegex( x.regex(), x.regexFlags() );
+ if ( !simple.empty() ) {
+ lower = addObj( BSON( "" << simple ) ).firstElement();
+ upper = addObj( BSON( "" << simpleRegexEnd( simple ) ) ).firstElement();
+ break;
+ }
+ }
+ }
break;
}
case BSONObj::opMOD: {
@@ -206,10 +303,18 @@ namespace mongo {
break;
}
+ case BSONObj::opREGEX:
+ case BSONObj::opOPTIONS:
+ // do nothing
+ break;
case BSONObj::opELEM_MATCH: {
log() << "warning: shouldn't get here?" << endl;
break;
}
+ case BSONObj::opNEAR:
+ case BSONObj::opWITHIN:
+ _special = "2d";
+ break;
default:
break;
}
@@ -269,19 +374,118 @@ namespace mongo {
intervals_ = newIntervals;
for( vector< BSONObj >::const_iterator i = other.objData_.begin(); i != other.objData_.end(); ++i )
objData_.push_back( *i );
+ if ( _special.size() == 0 && other._special.size() )
+ _special = other._special;
return *this;
}
- string FieldRange::simpleRegexEnd( string regex ) {
- ++regex[ regex.length() - 1 ];
- return regex;
- }
+ void handleInterval( const FieldInterval &lower, FieldBound &low, FieldBound &high, vector< FieldInterval > &newIntervals ) {
+ if ( low.bound_.eoo() ) {
+ low = lower.lower_; high = lower.upper_;
+ } else {
+ if ( high.bound_.woCompare( lower.lower_.bound_, false ) < 0 ) { // when equal but neither inclusive, just assume they overlap, since current btree scanning code just as efficient either way
+ FieldInterval tmp;
+ tmp.lower_ = low;
+ tmp.upper_ = high;
+ newIntervals.push_back( tmp );
+ low = lower.lower_; high = lower.upper_;
+ } else {
+ high = lower.upper_;
+ }
+ }
+ }
+
+ const FieldRange &FieldRange::operator|=( const FieldRange &other ) {
+ vector< FieldInterval > newIntervals;
+ FieldBound low;
+ FieldBound high;
+ vector< FieldInterval >::const_iterator i = intervals_.begin();
+ vector< FieldInterval >::const_iterator j = other.intervals_.begin();
+ while( i != intervals_.end() && j != other.intervals_.end() ) {
+ int cmp = i->lower_.bound_.woCompare( j->lower_.bound_, false );
+ if ( ( cmp == 0 && i->lower_.inclusive_ ) || cmp < 0 ) {
+ handleInterval( *i, low, high, newIntervals );
+ ++i;
+ } else {
+ handleInterval( *j, low, high, newIntervals );
+ ++j;
+ }
+ }
+ while( i != intervals_.end() ) {
+ handleInterval( *i, low, high, newIntervals );
+ ++i;
+ }
+ while( j != other.intervals_.end() ) {
+ handleInterval( *j, low, high, newIntervals );
+ ++j;
+ }
+ FieldInterval tmp;
+ tmp.lower_ = low;
+ tmp.upper_ = high;
+ newIntervals.push_back( tmp );
+ intervals_ = newIntervals;
+ for( vector< BSONObj >::const_iterator i = other.objData_.begin(); i != other.objData_.end(); ++i )
+ objData_.push_back( *i );
+ if ( _special.size() == 0 && other._special.size() )
+ _special = other._special;
+ return *this;
+ }
BSONObj FieldRange::addObj( const BSONObj &o ) {
objData_.push_back( o );
return o;
}
+ string FieldRangeSet::getSpecial() const {
+ string s = "";
+ for ( map<string,FieldRange>::iterator i=ranges_.begin(); i!=ranges_.end(); i++ ){
+ if ( i->second.getSpecial().size() == 0 )
+ continue;
+ uassert( 13033 , "can't have 2 special fields" , s.size() == 0 );
+ s = i->second.getSpecial();
+ }
+ return s;
+ }
+
+ void FieldRangeSet::processOpElement( const char *fieldName, const BSONElement &f, bool isNot, bool optimize ) {
+ BSONElement g = f;
+ int op2 = g.getGtLtOp();
+ if ( op2 == BSONObj::opALL ) {
+ BSONElement h = g;
+ massert( 13050 , "$all requires array", h.type() == Array );
+ BSONObjIterator i( h.embeddedObject() );
+ if( i.more() ) {
+ BSONElement x = i.next();
+ if ( x.type() == Object && x.embeddedObject().firstElement().getGtLtOp() == BSONObj::opELEM_MATCH ) {
+ g = x.embeddedObject().firstElement();
+ op2 = g.getGtLtOp();
+ }
+ }
+ }
+ if ( op2 == BSONObj::opELEM_MATCH ) {
+ BSONObjIterator k( g.embeddedObjectUserCheck() );
+ while ( k.more() ){
+ BSONElement h = k.next();
+ StringBuilder buf(32);
+ buf << fieldName << "." << h.fieldName();
+ string fullname = buf.str();
+
+ int op3 = getGtLtOp( h );
+ if ( op3 == BSONObj::Equality ){
+ ranges_[ fullname ] &= FieldRange( h , isNot , optimize );
+ }
+ else {
+ BSONObjIterator l( h.embeddedObject() );
+ while ( l.more() ){
+ ranges_[ fullname ] &= FieldRange( l.next() , isNot , optimize );
+ }
+ }
+ }
+ } else {
+ ranges_[ fieldName ] &= FieldRange( f , isNot , optimize );
+ }
+ }
+
FieldRangeSet::FieldRangeSet( const char *ns, const BSONObj &query , bool optimize )
: ns_( ns ), query_( query.getOwned() ) {
BSONObjIterator i( query_ );
@@ -293,36 +497,38 @@ namespace mongo {
if ( strcmp( e.fieldName(), "$where" ) == 0 )
continue;
- int op = getGtLtOp( e );
+ bool equality = ( getGtLtOp( e ) == BSONObj::Equality );
+ if ( equality && e.type() == Object ) {
+ equality = ( strcmp( e.embeddedObject().firstElement().fieldName(), "$not" ) != 0 );
+ }
- if ( op == BSONObj::Equality || op == BSONObj::opREGEX || op == BSONObj::opOPTIONS ) {
- ranges_[ e.fieldName() ] &= FieldRange( e , optimize );
- }
- else if ( op == BSONObj::opELEM_MATCH ){
- BSONObjIterator i( e.embeddedObjectUserCheck().firstElement().embeddedObjectUserCheck() );
- while ( i.more() ){
- BSONElement f = i.next();
- StringBuilder buf(32);
- buf << e.fieldName() << "." << f.fieldName();
- string fullname = buf.str();
-
- int op2 = getGtLtOp( f );
- if ( op2 == BSONObj::Equality ){
- ranges_[ fullname ] &= FieldRange( f , optimize );
- }
- else {
- BSONObjIterator j( f.embeddedObject() );
- while ( j.more() ){
- ranges_[ fullname ] &= FieldRange( j.next() , optimize );
+ if ( equality || ( e.type() == Object && !e.embeddedObject()[ "$regex" ].eoo() ) ) {
+ ranges_[ e.fieldName() ] &= FieldRange( e , false , optimize );
+ }
+ if ( !equality ) {
+ BSONObjIterator j( e.embeddedObject() );
+ while( j.more() ) {
+ BSONElement f = j.next();
+ if ( strcmp( f.fieldName(), "$not" ) == 0 ) {
+ switch( f.type() ) {
+ case Object: {
+ BSONObjIterator k( f.embeddedObject() );
+ while( k.more() ) {
+ BSONElement g = k.next();
+ uassert( 13034, "invalid use of $not", g.getGtLtOp() != BSONObj::Equality );
+ processOpElement( e.fieldName(), g, true, optimize );
+ }
+ break;
+ }
+ case RegEx:
+ processOpElement( e.fieldName(), f, true, optimize );
+ break;
+ default:
+ uassert( 13041, "invalid use of $not", false );
}
+ } else {
+ processOpElement( e.fieldName(), f, false, optimize );
}
- }
- }
- else {
- BSONObjIterator i( e.embeddedObject() );
- while( i.more() ) {
- BSONElement f = i.next();
- ranges_[ e.fieldName() ] &= FieldRange( f , optimize );
}
}
}
@@ -445,8 +651,8 @@ namespace mongo {
///////////////////
void FieldMatcher::add( const BSONObj& o ){
- massert( 10371 , "can only add to FieldMatcher once", source_.isEmpty());
- source_ = o;
+ massert( 10371 , "can only add to FieldMatcher once", _source.isEmpty());
+ _source = o;
BSONObjIterator i( o );
int true_false = -1;
@@ -457,23 +663,24 @@ namespace mongo {
// validate input
if (true_false == -1){
true_false = e.trueValue();
- include_ = !e.trueValue();
- }else{
- if((bool) true_false != e.trueValue())
- errmsg = "You cannot currently mix including and excluding fields. Contact us if this is an issue.";
+ _include = !e.trueValue();
+ }
+ else{
+ uassert( 10053 , "You cannot currently mix including and excluding fields. Contact us if this is an issue." ,
+ (bool)true_false == e.trueValue() );
}
}
}
void FieldMatcher::add(const string& field, bool include){
if (field.empty()){ // this is the field the user referred to
- include_ = include;
+ _include = include;
} else {
const size_t dot = field.find('.');
const string subfield = field.substr(0,dot);
const string rest = (dot == string::npos ? "" : field.substr(dot+1,string::npos));
- boost::shared_ptr<FieldMatcher>& fm = fields_[subfield];
+ boost::shared_ptr<FieldMatcher>& fm = _fields[subfield];
if (!fm)
fm.reset(new FieldMatcher(!include));
@@ -482,7 +689,7 @@ namespace mongo {
}
BSONObj FieldMatcher::getSpec() const{
- return source_;
+ return _source;
}
//b will be the value part of an array-typed BSONElement
@@ -509,7 +716,7 @@ namespace mongo {
break;
}
default:
- if (include_)
+ if (_include)
b.appendAs(e, b.numStr(i++).c_str());
}
@@ -518,18 +725,20 @@ namespace mongo {
}
void FieldMatcher::append( BSONObjBuilder& b , const BSONElement& e ) const {
- FieldMap::const_iterator field = fields_.find( e.fieldName() );
+ FieldMap::const_iterator field = _fields.find( e.fieldName() );
- if (field == fields_.end()){
- if (include_)
+ if (field == _fields.end()){
+ if (_include)
b.append(e);
- } else {
+ }
+ else {
FieldMatcher& subfm = *field->second;
-
- if (subfm.fields_.empty() || !(e.type()==Object || e.type()==Array) ){
- if (subfm.include_)
+
+ if (subfm._fields.empty() || !(e.type()==Object || e.type()==Array) ){
+ if (subfm._include)
b.append(e);
- } else if (e.type() == Object){
+ }
+ else if (e.type() == Object){
BSONObjBuilder subb;
BSONObjIterator it(e.embeddedObject());
while (it.more()){
@@ -537,7 +746,8 @@ namespace mongo {
}
b.append(e.fieldName(), subb.obj());
- } else { //Array
+ }
+ else { //Array
BSONObjBuilder subb;
subfm.appendArray(subb, e.embeddedObject());
b.appendArray(e.fieldName(), subb.obj());
diff --git a/db/queryutil.h b/db/queryutil.h
index 2122a7f..7d8be78 100644
--- a/db/queryutil.h
+++ b/db/queryutil.h
@@ -48,8 +48,9 @@ namespace mongo {
// determine index limits
class FieldRange {
public:
- FieldRange( const BSONElement &e = BSONObj().firstElement() , bool optimize=true );
+ FieldRange( const BSONElement &e = BSONObj().firstElement() , bool isNot=false , bool optimize=true );
const FieldRange &operator&=( const FieldRange &other );
+ const FieldRange &operator|=( const FieldRange &other );
BSONElement min() const { assert( !empty() ); return intervals_[ 0 ].lower_.bound_; }
BSONElement max() const { assert( !empty() ); return intervals_[ intervals_.size() - 1 ].upper_.bound_; }
bool minInclusive() const { assert( !empty() ); return intervals_[ 0 ].lower_.inclusive_; }
@@ -69,11 +70,13 @@ namespace mongo {
}
bool empty() const { return intervals_.empty(); }
const vector< FieldInterval > &intervals() const { return intervals_; }
+ string getSpecial() const { return _special; }
+
private:
BSONObj addObj( const BSONObj &o );
- string simpleRegexEnd( string regex );
vector< FieldInterval > intervals_;
vector< BSONObj > objData_;
+ string _special;
};
// implements query pattern matching, used to determine if a query is
@@ -171,7 +174,9 @@ namespace mongo {
}
QueryPattern pattern( const BSONObj &sort = BSONObj() ) const;
BoundList indexBounds( const BSONObj &keyPattern, int direction ) const;
+ string getSpecial() const;
private:
+ void processOpElement( const char *fieldName, const BSONElement &f, bool isNot, bool optimize );
static FieldRange *trivialRange_;
static FieldRange &trivialRange();
mutable map< string, FieldRange > ranges_;
@@ -185,26 +190,34 @@ namespace mongo {
class FieldMatcher {
public:
- FieldMatcher(bool include=false) : errmsg(NULL), include_(include) {}
+ FieldMatcher(bool include=false) : _include(include){}
void add( const BSONObj& o );
void append( BSONObjBuilder& b , const BSONElement& e ) const;
BSONObj getSpec() const;
-
- const char* errmsg; //null if FieldMatcher is valid
private:
void add( const string& field, bool include );
void appendArray( BSONObjBuilder& b , const BSONObj& a ) const;
- bool include_; // true if default at this level is to include
+ bool _include; // true if default at this level is to include
//TODO: benchmark vector<pair> vs map
typedef map<string, boost::shared_ptr<FieldMatcher> > FieldMap;
- FieldMap fields_;
- BSONObj source_;
+ FieldMap _fields;
+ BSONObj _source;
};
+ /** returns a string that when used as a matcher, would match a super set of regex()
+ returns "" for complex regular expressions
+ used to optimize queries in some simple regex cases that start with '^'
+
+ if purePrefix != NULL, sets it to whether the regex can be converted to a range query
+ */
+ string simpleRegex(const char* regex, const char* flags, bool* purePrefix=NULL);
+
+ /** returns the upper bound of a query that matches prefix */
+ string simpleRegexEnd( string prefix );
} // namespace mongo
diff --git a/db/rec.h b/db/rec.h
index b749dd8..ee75669 100644
--- a/db/rec.h
+++ b/db/rec.h
@@ -1,4 +1,20 @@
// rec.h
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
/* TODO for _RECSTORE
diff --git a/db/reccache.cpp b/db/reccache.cpp
index 66dd4e3..6e1f3de 100644
--- a/db/reccache.cpp
+++ b/db/reccache.cpp
@@ -1,134 +1,150 @@
-// storage.cpp
-
-#include "stdafx.h"
-#include "pdfile.h"
-#include "reccache.h"
-#include "rec.h"
-#include "db.h"
-
-namespace mongo {
-
-RecCache theRecCache(BucketSize);
-
-// 100k * 8KB = 800MB
-unsigned RecCache::MAXNODES = 50000;
-
-void setRecCacheSize(unsigned mb) {
- unsigned long long MB = mb;
- log(2) << "reccache size: " << MB << "MB\n";
- uassert( 10114 , "bad cache size", MB > 0 && MB < 1000000 );
- RecCache::MAXNODES = (unsigned) MB * 1024 * 1024 / 8192;
- log(3) << "RecCache::MAXNODES=" << RecCache::MAXNODES << '\n';
-}
-
-void writerThread() {
- sleepsecs(10);
- while( 1 ) {
- try {
- theRecCache.writeLazily();
- }
- catch(...) {
- log() << "exception in writerThread()" << endl;
- sleepsecs(3);
- }
- }
-}
-
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+// storage.cpp
+
+#include "stdafx.h"
+#include "pdfile.h"
+#include "reccache.h"
+#include "rec.h"
+#include "db.h"
+
+namespace mongo {
+
+RecCache theRecCache(BucketSize);
+
+// 100k * 8KB = 800MB
+unsigned RecCache::MAXNODES = 50000;
+
+void setRecCacheSize(unsigned mb) {
+ unsigned long long MB = mb;
+ log(2) << "reccache size: " << MB << "MB\n";
+ uassert( 10114 , "bad cache size", MB > 0 && MB < 1000000 );
+ RecCache::MAXNODES = (unsigned) MB * 1024 * 1024 / 8192;
+ log(3) << "RecCache::MAXNODES=" << RecCache::MAXNODES << '\n';
+}
+
+void writerThread() {
+ sleepsecs(10);
+ while( 1 ) {
+ try {
+ theRecCache.writeLazily();
+ }
+ catch(...) {
+ log() << "exception in writerThread()" << endl;
+ sleepsecs(3);
+ }
+ }
+}
+
// called on program exit.
-void recCacheCloseAll() {
-#if defined(_RECSTORE)
- theRecCache.closing();
-#endif
-}
-
-int ndirtywritten;
-
-inline static string escape(const char *ns) {
- char buf[256];
- char *p = buf;
- while( 1 ) {
- if( *ns == '$' ) *p = '~';
- else
- *p = *ns;
- if( *ns == 0 )
- break;
- p++; ns++;
- }
- assert( p - buf < (int) sizeof(buf) );
- return buf;
-}
-
-inline static string unescape(const char *ns) {
- char buf[256];
- char *p = buf;
- while( 1 ) {
- if( *ns == '~' ) *p = '$';
- else
- *p = *ns;
- if( *ns == 0 )
- break;
- p++; ns++;
- }
- assert( p - buf < (int) sizeof(buf) );
- return buf;
-}
-
-string RecCache::directory() {
- return cc().database()->path;
-}
-
-/* filename format is
-
- <n>-<ns>.idx
-*/
-
-BasicRecStore* RecCache::_initStore(string fname) {
-
- assert( strchr(fname.c_str(), '/') == 0 );
- assert( strchr(fname.c_str(), '\\') == 0 );
-
- stringstream ss(fname);
- int n;
- ss >> n;
- assert( n >= 0 );
- char ch;
- ss >> ch;
- assert( ch == '-' );
- string rest;
- ss >> rest;
- const char *p = rest.c_str();
- const char *q = strstr(p, ".idx");
- assert( q );
- string escaped_ns(p, q-p);
-
- // arbitrary limit. if you are hitting, we should use fewer files and put multiple
- // indexes in a single file (which is easy to do)
- massert( 10374 , "too many index files", n < 10000 );
-
- if( stores.size() < (unsigned)n+1 )
- stores.resize(n+1);
- assert( stores[n] == 0 );
- BasicRecStore *rs = new BasicRecStore(n);
- path pf(directory());
- pf /= fname;
- string full = pf.string();
- rs->init(full.c_str(), recsize);
- stores[n] = rs;
- string ns = unescape(escaped_ns.c_str());
- storesByNsKey[mknskey(ns.c_str())] = rs;
- return rs;
-}
-
-BasicRecStore* RecCache::initStore(int n) {
- string ns;
- {
- stringstream ss;
- ss << '/' << n << '-';
- ns = ss.str();
- }
-
- /* this will be slow if there are thousands of files */
- path dir(directory());
+void recCacheCloseAll() {
+#if defined(_RECSTORE)
+ theRecCache.closing();
+#endif
+}
+
+int ndirtywritten;
+
+inline static string escape(const char *ns) {
+ char buf[256];
+ char *p = buf;
+ while( 1 ) {
+ if( *ns == '$' ) *p = '~';
+ else
+ *p = *ns;
+ if( *ns == 0 )
+ break;
+ p++; ns++;
+ }
+ assert( p - buf < (int) sizeof(buf) );
+ return buf;
+}
+
+inline static string unescape(const char *ns) {
+ char buf[256];
+ char *p = buf;
+ while( 1 ) {
+ if( *ns == '~' ) *p = '$';
+ else
+ *p = *ns;
+ if( *ns == 0 )
+ break;
+ p++; ns++;
+ }
+ assert( p - buf < (int) sizeof(buf) );
+ return buf;
+}
+
+string RecCache::directory() {
+ return cc().database()->path;
+}
+
+/* filename format is
+
+ <n>-<ns>.idx
+*/
+
+BasicRecStore* RecCache::_initStore(string fname) {
+
+ assert( strchr(fname.c_str(), '/') == 0 );
+ assert( strchr(fname.c_str(), '\\') == 0 );
+
+ stringstream ss(fname);
+ int n;
+ ss >> n;
+ assert( n >= 0 );
+ char ch;
+ ss >> ch;
+ assert( ch == '-' );
+ string rest;
+ ss >> rest;
+ const char *p = rest.c_str();
+ const char *q = strstr(p, ".idx");
+ assert( q );
+ string escaped_ns(p, q-p);
+
+ // arbitrary limit. if you are hitting, we should use fewer files and put multiple
+ // indexes in a single file (which is easy to do)
+ massert( 10374 , "too many index files", n < 10000 );
+
+ if( stores.size() < (unsigned)n+1 )
+ stores.resize(n+1);
+ assert( stores[n] == 0 );
+ BasicRecStore *rs = new BasicRecStore(n);
+ path pf(directory());
+ pf /= fname;
+ string full = pf.string();
+ rs->init(full.c_str(), recsize);
+ stores[n] = rs;
+ string ns = unescape(escaped_ns.c_str());
+ storesByNsKey[mknskey(ns.c_str())] = rs;
+ return rs;
+}
+
+BasicRecStore* RecCache::initStore(int n) {
+ string ns;
+ {
+ stringstream ss;
+ ss << '/' << n << '-';
+ ns = ss.str();
+ }
+
+ /* this will be slow if there are thousands of files */
+ path dir(directory());
directory_iterator end;
try {
directory_iterator i(dir);
@@ -152,27 +168,27 @@ BasicRecStore* RecCache::initStore(int n) {
}
stringstream ss;
ss << "index datafile missing? n=" << n;
- uasserted(12500,ss.str());
- return 0;
-}
-
-/* find the filename for a given ns.
- format is
- <n>-<escaped_ns>.idx
- returns filename. found is true if found. If false, a proposed name is returned for (optional) creation
- of the file.
-*/
-string RecCache::findStoreFilename(const char *_ns, bool& found) {
- string namefrag;
- {
- stringstream ss;
- ss << '-';
- ss << escape(_ns);
- ss << ".idx";
- namefrag = ss.str();
- }
-
- path dir(directory());
+ uasserted(12500,ss.str());
+ return 0;
+}
+
+/* find the filename for a given ns.
+ format is
+ <n>-<escaped_ns>.idx
+ returns filename. found is true if found. If false, a proposed name is returned for (optional) creation
+ of the file.
+*/
+string RecCache::findStoreFilename(const char *_ns, bool& found) {
+ string namefrag;
+ {
+ stringstream ss;
+ ss << '-';
+ ss << escape(_ns);
+ ss << ".idx";
+ namefrag = ss.str();
+ }
+
+ path dir(directory());
directory_iterator end;
int nmax = -1;
try {
@@ -204,198 +220,198 @@ string RecCache::findStoreFilename(const char *_ns, bool& found) {
ss << nmax+1 << namefrag;
found = false;
return ss.str();
-}
-
-void RecCache::initStoreByNs(const char *_ns, const string& nskey) {
- bool found;
- string fn = findStoreFilename(_ns, found);
+}
+
+void RecCache::initStoreByNs(const char *_ns, const string& nskey) {
+ bool found;
+ string fn = findStoreFilename(_ns, found);
_initStore(fn);
-}
-
-inline void RecCache::writeIfDirty(Node *n) {
- if( n->dirty ) {
- ndirtywritten++;
- n->dirty = false;
- store(n->loc).update(fileOfs(n->loc), n->data, recsize);
- }
-}
-
-void RecCache::closeFiles(string dbname, string path) {
- assertInWriteLock();
- boostlock lk(rcmutex);
-
- // first we write all dirty pages. it is not easy to check which Nodes are for a particular
- // db, so we just write them all.
- writeDirty( dirtyl.begin(), true );
-
- string key = path + dbname + '.';
- unsigned sz = key.size();
- for( map<string, BasicRecStore*>::iterator i = storesByNsKey.begin(); i != storesByNsKey.end(); i++ ) {
- map<string, BasicRecStore*>::iterator j = i;
- i++;
- if( strncmp(j->first.c_str(), key.c_str(), sz) == 0 ) {
- assert( stores[j->second->fileNumber] != 0 );
- stores[j->second->fileNumber] = 0;
- delete j->second;
- storesByNsKey.erase(j);
- }
- }
-}
-
-void RecCache::closing() {
- boostlock lk(rcmutex);
- (cout << "TEMP: recCacheCloseAll() writing dirty pages...\n").flush();
- writeDirty( dirtyl.begin(), true );
- for( unsigned i = 0; i < stores.size(); i++ ) {
- if( stores[i] ) {
- delete stores[i];
- }
- }
- (cout << "TEMP: write dirty done\n").flush();
-}
-
-/* note that this is written in order, as much as possible, given that dirtyl is of type set. */
-void RecCache::writeDirty( set<DiskLoc>::iterator startAt, bool rawLog ) {
- try {
- ndirtywritten=0;
- for( set<DiskLoc>::iterator i = startAt; i != dirtyl.end(); i++ ) {
- map<DiskLoc, Node*>::iterator j = m.find(*i);
- if( j != m.end() )
- writeIfDirty(j->second);
- }
- OCCASIONALLY out() << "TEMP: ndirtywritten: " << ndirtywritten << endl;
- }
- catch(...) {
+}
+
+inline void RecCache::writeIfDirty(Node *n) {
+ if( n->dirty ) {
+ ndirtywritten++;
+ n->dirty = false;
+ store(n->loc).update(fileOfs(n->loc), n->data, recsize);
+ }
+}
+
+void RecCache::closeFiles(string dbname, string path) {
+ assertInWriteLock();
+ scoped_lock lk(rcmutex);
+
+ // first we write all dirty pages. it is not easy to check which Nodes are for a particular
+ // db, so we just write them all.
+ writeDirty( dirtyl.begin(), true );
+
+ string key = path + dbname + '.';
+ unsigned sz = key.size();
+ for( map<string, BasicRecStore*>::iterator i = storesByNsKey.begin(); i != storesByNsKey.end(); i++ ) {
+ map<string, BasicRecStore*>::iterator j = i;
+ i++;
+ if( strncmp(j->first.c_str(), key.c_str(), sz) == 0 ) {
+ assert( stores[j->second->fileNumber] != 0 );
+ stores[j->second->fileNumber] = 0;
+ delete j->second;
+ storesByNsKey.erase(j);
+ }
+ }
+}
+
+void RecCache::closing() {
+ scoped_lock lk(rcmutex);
+ (cout << "TEMP: recCacheCloseAll() writing dirty pages...\n").flush();
+ writeDirty( dirtyl.begin(), true );
+ for( unsigned i = 0; i < stores.size(); i++ ) {
+ if( stores[i] ) {
+ delete stores[i];
+ }
+ }
+ (cout << "TEMP: write dirty done\n").flush();
+}
+
+/* note that this is written in order, as much as possible, given that dirtyl is of type set. */
+void RecCache::writeDirty( set<DiskLoc>::iterator startAt, bool rawLog ) {
+ try {
+ ndirtywritten=0;
+ for( set<DiskLoc>::iterator i = startAt; i != dirtyl.end(); i++ ) {
+ map<DiskLoc, Node*>::iterator j = m.find(*i);
+ if( j != m.end() )
+ writeIfDirty(j->second);
+ }
+ OCCASIONALLY out() << "TEMP: ndirtywritten: " << ndirtywritten << endl;
+ }
+ catch(...) {
const char *message = "Problem: bad() in RecCache::writeDirty, file io error\n";
if ( rawLog )
rawOut( message );
else
( log() << message ).flush();
- }
- dirtyl.clear();
-}
-
-void RecCache::writeLazily() {
- int sleep = 0;
- int k;
- {
- boostlock lk(rcmutex);
- Timer t;
- set<DiskLoc>::iterator i = dirtyl.end();
- for( k = 0; k < 100; k++ ) {
- if( i == dirtyl.begin() ) {
- // we're not very far behind
- sleep = k < 20 ? 2000 : 1000;
- break;
- }
- i--;
- }
- writeDirty(i);
- if( sleep == 0 ) {
- sleep = t.millis() * 4 + 10;
- }
- }
-
- OCCASIONALLY cout << "writeLazily " << k << " sleep:" << sleep << '\n';
- sleepmillis(sleep);
-}
-
-void RecCache::_ejectOld() {
- boostlock lk(rcmutex);
- if( nnodes <= MAXNODES )
- return;
- Node *n = oldest;
- while( 1 ) {
- if( nnodes <= MAXNODES - 4 ) {
- n->older = 0;
- oldest = n;
- assert( oldest ) ;
- break;
- }
- nnodes--;
- assert(n);
- Node *nxt = n->newer;
- writeIfDirty(n);
- m.erase(n->loc);
- delete n;
- n = nxt;
- }
-}
-
-void RecCache::dump() {
- Node *n = oldest;
- Node *last = 0;
- while( n ) {
- assert( n->older == last );
- last = n;
-// cout << n << ' ' << n->older << ' ' << n->newer << '\n';
- n=n->newer;
- }
- assert( newest == last );
-// cout << endl;
-}
-
-/* cleans up everything EXCEPT storesByNsKey.
- note this function is slow should not be invoked often
-*/
-void RecCache::closeStore(BasicRecStore *rs) {
- int n = rs->fileNumber + Base;
- for( set<DiskLoc>::iterator i = dirtyl.begin(); i != dirtyl.end(); ) {
- DiskLoc k = *i++;
- if( k.a() == n )
- dirtyl.erase(k);
- }
-
- for( map<DiskLoc,Node*>::iterator i = m.begin(); i != m.end(); ) {
- DiskLoc k = i->first;
- i++;
- if( k.a() == n )
- m.erase(k);
- }
-
- assert( stores[rs->fileNumber] != 0 );
- stores[rs->fileNumber] = 0;
-/*
- for( unsigned i = 0; i < stores.size(); i++ ) {
- if( stores[i] == rs ) {
- stores[i] = 0;
- break;
- }
- }*/
- delete rs; // closes file
-}
-
-void RecCache::drop(const char *_ns) {
- // todo: test with a non clean shutdown file
- boostlock lk(rcmutex);
-
- map<string, BasicRecStore*>::iterator it = storesByNsKey.find(mknskey(_ns));
- string fname;
- if( it != storesByNsKey.end() ) {
- fname = it->second->filename;
- closeStore(it->second); // cleans up stores[] etc.
- storesByNsKey.erase(it);
- }
- else {
- bool found;
- fname = findStoreFilename(_ns, found);
- if( !found ) {
- log() << "RecCache::drop: no idx file found for " << _ns << endl;
- return;
- }
- path pf(directory());
- pf /= fname;
- fname = pf.string();
- }
- try {
- if( !boost::filesystem::exists(fname) )
- log() << "RecCache::drop: can't find file to remove " << fname << endl;
- boost::filesystem::remove(fname);
- }
- catch(...) {
- log() << "RecCache::drop: exception removing file " << fname << endl;
- }
-}
-
-}
+ }
+ dirtyl.clear();
+}
+
+void RecCache::writeLazily() {
+ int sleep = 0;
+ int k;
+ {
+ scoped_lock lk(rcmutex);
+ Timer t;
+ set<DiskLoc>::iterator i = dirtyl.end();
+ for( k = 0; k < 100; k++ ) {
+ if( i == dirtyl.begin() ) {
+ // we're not very far behind
+ sleep = k < 20 ? 2000 : 1000;
+ break;
+ }
+ i--;
+ }
+ writeDirty(i);
+ if( sleep == 0 ) {
+ sleep = t.millis() * 4 + 10;
+ }
+ }
+
+ OCCASIONALLY cout << "writeLazily " << k << " sleep:" << sleep << '\n';
+ sleepmillis(sleep);
+}
+
+void RecCache::_ejectOld() {
+ scoped_lock lk(rcmutex);
+ if( nnodes <= MAXNODES )
+ return;
+ Node *n = oldest;
+ while( 1 ) {
+ if( nnodes <= MAXNODES - 4 ) {
+ n->older = 0;
+ oldest = n;
+ assert( oldest ) ;
+ break;
+ }
+ nnodes--;
+ assert(n);
+ Node *nxt = n->newer;
+ writeIfDirty(n);
+ m.erase(n->loc);
+ delete n;
+ n = nxt;
+ }
+}
+
+void RecCache::dump() {
+ Node *n = oldest;
+ Node *last = 0;
+ while( n ) {
+ assert( n->older == last );
+ last = n;
+// cout << n << ' ' << n->older << ' ' << n->newer << '\n';
+ n=n->newer;
+ }
+ assert( newest == last );
+// cout << endl;
+}
+
+/* cleans up everything EXCEPT storesByNsKey.
+ note this function is slow should not be invoked often
+*/
+void RecCache::closeStore(BasicRecStore *rs) {
+ int n = rs->fileNumber + Base;
+ for( set<DiskLoc>::iterator i = dirtyl.begin(); i != dirtyl.end(); ) {
+ DiskLoc k = *i++;
+ if( k.a() == n )
+ dirtyl.erase(k);
+ }
+
+ for( map<DiskLoc,Node*>::iterator i = m.begin(); i != m.end(); ) {
+ DiskLoc k = i->first;
+ i++;
+ if( k.a() == n )
+ m.erase(k);
+ }
+
+ assert( stores[rs->fileNumber] != 0 );
+ stores[rs->fileNumber] = 0;
+/*
+ for( unsigned i = 0; i < stores.size(); i++ ) {
+ if( stores[i] == rs ) {
+ stores[i] = 0;
+ break;
+ }
+ }*/
+ delete rs; // closes file
+}
+
+void RecCache::drop(const char *_ns) {
+ // todo: test with a non clean shutdown file
+ scoped_lock lk(rcmutex);
+
+ map<string, BasicRecStore*>::iterator it = storesByNsKey.find(mknskey(_ns));
+ string fname;
+ if( it != storesByNsKey.end() ) {
+ fname = it->second->filename;
+ closeStore(it->second); // cleans up stores[] etc.
+ storesByNsKey.erase(it);
+ }
+ else {
+ bool found;
+ fname = findStoreFilename(_ns, found);
+ if( !found ) {
+ log() << "RecCache::drop: no idx file found for " << _ns << endl;
+ return;
+ }
+ path pf(directory());
+ pf /= fname;
+ fname = pf.string();
+ }
+ try {
+ if( !boost::filesystem::exists(fname) )
+ log() << "RecCache::drop: can't find file to remove " << fname << endl;
+ boost::filesystem::remove(fname);
+ }
+ catch(...) {
+ log() << "RecCache::drop: exception removing file " << fname << endl;
+ }
+}
+
+}
diff --git a/db/reccache.h b/db/reccache.h
index 42943c5..d354587 100644
--- a/db/reccache.h
+++ b/db/reccache.h
@@ -1,4 +1,20 @@
// reccache.h
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
/* CachedBasicRecStore
This is our store which implements a traditional page-cache type of storage
@@ -33,7 +49,7 @@ class RecCache {
bool dirty;
Node *older, *newer; // lru
};
- boost::mutex &rcmutex; // mainly to coordinate with the lazy writer thread
+ mongo::mutex rcmutex; // mainly to coordinate with the lazy writer thread
unsigned recsize;
map<DiskLoc, Node*> m; // the cache
Node *newest, *oldest;
@@ -118,7 +134,7 @@ private:
public:
/* all public functions (except constructor) should use the mutex */
- RecCache(unsigned recsz) : rcmutex( *( new boost::mutex() ) ), recsize(recsz) {
+ RecCache(unsigned recsz) : recsize(recsz) {
nnodes = 0;
newest = oldest = 0;
}
@@ -140,7 +156,7 @@ public:
*/
void dirty(DiskLoc d) {
assert( d.a() >= Base );
- boostlock lk(rcmutex);
+ scoped_lock lk(rcmutex);
map<DiskLoc, Node*>::iterator i = m.find(d);
if( i != m.end() ) {
Node *n = i->second;
@@ -155,7 +171,7 @@ public:
assert( d.a() >= Base );
assert( len == recsize );
- boostlock lk(rcmutex);
+ scoped_lock lk(rcmutex);
map<DiskLoc, Node*>::iterator i = m.find(d);
if( i != m.end() ) {
touch(i->second);
@@ -172,7 +188,7 @@ public:
void drop(const char *ns);
DiskLoc insert(const char *ns, const void *obuf, int len, bool god) {
- boostlock lk(rcmutex);
+ scoped_lock lk(rcmutex);
BasicRecStore& rs = store(ns);
fileofs o = rs.insert((const char *) obuf, len);
assert( o % recsize == 0 );
@@ -229,9 +245,11 @@ public:
*/
inline void dbunlocking_read() {
+ /*
Client *c = currentClient.get();
if ( c )
c->top.clientStop();
+ */
}
inline void dbunlocking_write() {
diff --git a/db/reci.h b/db/reci.h
index 295388c..08dcece 100644
--- a/db/reci.h
+++ b/db/reci.h
@@ -1,8 +1,24 @@
// reci.h
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
#pragma once
-#include "storage.h"
+#include "diskloc.h"
namespace mongo {
diff --git a/db/recstore.h b/db/recstore.h
index 2e6a90a..bdb3d77 100644
--- a/db/recstore.h
+++ b/db/recstore.h
@@ -1,108 +1,124 @@
-// recstore.h
-
-#pragma once
-
-#include "../util/file.h"
-
-namespace mongo {
-
-using boost::uint32_t;
-using boost::uint64_t;
-
-/* Current version supports only consistent record sizes within a store. */
-
-class BasicRecStore {
- struct RecStoreHeader {
- uint32_t version;
- uint32_t recsize;
- uint64_t leof; // logical eof, actual file might be prealloc'd further
- uint64_t firstDeleted; // 0 = no deleted recs
- uint32_t cleanShutdown; // 0 = clean
- char reserved[8192-8-8-4-4-4]; // we want our records page-aligned in the file if they are a multiple of a page's size -- so we make this 8KB with that goal
- RecStoreHeader() {
- version = 65;
- recsize = 0;
- leof = sizeof(RecStoreHeader);
- firstDeleted = 0;
- cleanShutdown = 1;
- memset(reserved, 0, sizeof(reserved));
- }
- };
-
-public:
- BasicRecStore(int _fileNumber) : fileNumber(_fileNumber) { }
- ~BasicRecStore();
- void init(const char *fn, unsigned recsize);
- fileofs insert(const char *buf, unsigned len);
- void update(fileofs o, const char *buf, unsigned len);
- void remove(fileofs o, unsigned len);
- void get(fileofs o, char *buf, unsigned len);
-
- int fileNumber; // this goes in DiskLoc::a
-
- string filename;
-
-private:
-
- void writeHeader();
- File f;
- fileofs len;
- RecStoreHeader h; // h.reserved is wasteful here; fix later.
- void write(fileofs ofs, const char *data, unsigned len) {
- f.write(ofs, data, len);
- massert( 10380 , "basicrecstore write io error", !f.bad());
- }
-};
-
-/* --- implementation --- */
-
-inline BasicRecStore::~BasicRecStore() {
- h.cleanShutdown = 0;
- if( f.is_open() ) {
- writeHeader();
- f.fsync();
- }
-}
-
-inline void BasicRecStore::writeHeader() {
- write(0, (const char *) &h, 28); // update header in file for new leof
- uassert( 10115 , "file io error in BasicRecStore [1]", !f.bad());
-}
-
-inline fileofs BasicRecStore::insert(const char *buf, unsigned reclen) {
- if( h.firstDeleted ) {
- uasserted(11500, "deleted not yet implemented recstoreinsert");
- }
- massert( 10381 , "bad len", reclen == h.recsize);
- fileofs ofs = h.leof;
- h.leof += reclen;
- if( h.leof > len ) {
- // grow the file. we grow quite a bit to avoid excessive file system fragmentations
- len += (len / 8) + h.recsize;
- uassert( 10116 , "recstore file too big for 32 bit", len <= 0x7fffffff || sizeof(std::streamoff) > 4 );
- write(len, "", 0);
- }
- writeHeader();
- write(ofs, buf, reclen);
- uassert( 10117 , "file io error in BasicRecStore [2]", !f.bad());
- return ofs;
-}
-
-/* so far, it's ok to read or update a subset of a record */
-
-inline void BasicRecStore::update(fileofs o, const char *buf, unsigned len) {
- assert(o <= h.leof && o >= sizeof(RecStoreHeader));
- write(o, buf, len);
-}
-
-inline void BasicRecStore::get(fileofs o, char *buf, unsigned len) {
- assert(o <= h.leof && o >= sizeof(RecStoreHeader));
- f.read(o, buf, len);
- massert( 10382 , "basicrestore::get I/O error", !f.bad());
-}
-
-inline void BasicRecStore::remove(fileofs o, unsigned len) {
- uasserted(11501, "not yet implemented recstoreremove");
-}
-
-}
+// recstore.h
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#pragma once
+
+#include "../util/file.h"
+
+namespace mongo {
+
+using boost::uint32_t;
+using boost::uint64_t;
+
+/* Current version supports only consistent record sizes within a store. */
+
+class BasicRecStore {
+ struct RecStoreHeader {
+ uint32_t version;
+ uint32_t recsize;
+ uint64_t leof; // logical eof, actual file might be prealloc'd further
+ uint64_t firstDeleted; // 0 = no deleted recs
+ uint32_t cleanShutdown; // 0 = clean
+ char reserved[8192-8-8-4-4-4]; // we want our records page-aligned in the file if they are a multiple of a page's size -- so we make this 8KB with that goal
+ RecStoreHeader() {
+ version = 65;
+ recsize = 0;
+ leof = sizeof(RecStoreHeader);
+ firstDeleted = 0;
+ cleanShutdown = 1;
+ memset(reserved, 0, sizeof(reserved));
+ }
+ };
+
+public:
+ BasicRecStore(int _fileNumber) : fileNumber(_fileNumber) { }
+ ~BasicRecStore();
+ void init(const char *fn, unsigned recsize);
+ fileofs insert(const char *buf, unsigned len);
+ void update(fileofs o, const char *buf, unsigned len);
+ void remove(fileofs o, unsigned len);
+ void get(fileofs o, char *buf, unsigned len);
+
+ int fileNumber; // this goes in DiskLoc::a
+
+ string filename;
+
+private:
+
+ void writeHeader();
+ File f;
+ fileofs len;
+ RecStoreHeader h; // h.reserved is wasteful here; fix later.
+ void write(fileofs ofs, const char *data, unsigned len) {
+ f.write(ofs, data, len);
+ massert( 10380 , "basicrecstore write io error", !f.bad());
+ }
+};
+
+/* --- implementation --- */
+
+inline BasicRecStore::~BasicRecStore() {
+ h.cleanShutdown = 0;
+ if( f.is_open() ) {
+ writeHeader();
+ f.fsync();
+ }
+}
+
+inline void BasicRecStore::writeHeader() {
+ write(0, (const char *) &h, 28); // update header in file for new leof
+ uassert( 10115 , "file io error in BasicRecStore [1]", !f.bad());
+}
+
+inline fileofs BasicRecStore::insert(const char *buf, unsigned reclen) {
+ if( h.firstDeleted ) {
+ uasserted(11500, "deleted not yet implemented recstoreinsert");
+ }
+ massert( 10381 , "bad len", reclen == h.recsize);
+ fileofs ofs = h.leof;
+ h.leof += reclen;
+ if( h.leof > len ) {
+ // grow the file. we grow quite a bit to avoid excessive file system fragmentations
+ len += (len / 8) + h.recsize;
+ uassert( 10116 , "recstore file too big for 32 bit", len <= 0x7fffffff || sizeof(std::streamoff) > 4 );
+ write(len, "", 0);
+ }
+ writeHeader();
+ write(ofs, buf, reclen);
+ uassert( 10117 , "file io error in BasicRecStore [2]", !f.bad());
+ return ofs;
+}
+
+/* so far, it's ok to read or update a subset of a record */
+
+inline void BasicRecStore::update(fileofs o, const char *buf, unsigned len) {
+ assert(o <= h.leof && o >= sizeof(RecStoreHeader));
+ write(o, buf, len);
+}
+
+inline void BasicRecStore::get(fileofs o, char *buf, unsigned len) {
+ assert(o <= h.leof && o >= sizeof(RecStoreHeader));
+ f.read(o, buf, len);
+ massert( 10382 , "basicrestore::get I/O error", !f.bad());
+}
+
+inline void BasicRecStore::remove(fileofs o, unsigned len) {
+ uasserted(11501, "not yet implemented recstoreremove");
+}
+
+}
diff --git a/db/repl.cpp b/db/repl.cpp
index 04c8d73..62b2986 100644
--- a/db/repl.cpp
+++ b/db/repl.cpp
@@ -1,10 +1,8 @@
// repl.cpp
/* TODO
-
PAIRING
_ on a syncexception, don't allow going back to master state?
-
*/
/**
@@ -39,6 +37,7 @@
#include "repl.h"
#include "../util/message.h"
#include "../client/dbclient.h"
+#include "../client/connpool.h"
#include "pdfile.h"
#include "query.h"
#include "db.h"
@@ -47,6 +46,9 @@
#include "cmdline.h"
namespace mongo {
+
+ // our config from command line etc.
+ ReplSettings replSettings;
void ensureHaveIdIndex(const char *ns);
@@ -63,11 +65,12 @@ namespace mongo {
*/
const char *replAllDead = 0;
- extern bool autoresync;
time_t lastForcedResync = 0;
IdTracker &idTracker = *( new IdTracker() );
+ int __findingStartInitialTimeout = 5; // configurable for testing
+
} // namespace mongo
#include "replset.h"
@@ -137,6 +140,7 @@ namespace mongo {
virtual bool logTheOp() {
return false;
}
+ virtual LockType locktype(){ return WRITE; }
CmdReplacePeer() : Command("replacepeer") { }
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if ( replPair == 0 ) {
@@ -196,9 +200,12 @@ namespace mongo {
virtual bool logTheOp() {
return false;
}
+ virtual LockType locktype(){ return WRITE; }
CmdForceDead() : Command("forcedead") { }
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- replAllDead = "forced by command";
+ replAllDead = "replication forced to stop by 'forcedead' command";
+ log() << "*********************************************************\n";
+ log() << "received 'forcedead' command, replication forced to stop" << endl;
return true;
}
} cmdForceDead;
@@ -215,6 +222,7 @@ namespace mongo {
virtual bool logTheOp() {
return false;
}
+ virtual LockType locktype(){ return WRITE; }
CmdResync() : Command("resync") { }
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if ( cmdObj.getBoolField( "force" ) ) {
@@ -253,12 +261,85 @@ namespace mongo {
}
} cmdResync;
+ bool anyReplEnabled(){
+ return replPair || replSettings.slave || replSettings.master;
+ }
+
+ void appendReplicationInfo( BSONObjBuilder& result , bool authed , int level ){
+
+ if ( replAllDead ) {
+ result.append("ismaster", 0.0);
+ if( authed ) {
+ if ( replPair )
+ result.append("remote", replPair->remote);
+ }
+ string s = string("dead: ") + replAllDead;
+ result.append("info", s);
+ }
+ else if ( replPair ) {
+ result.append("ismaster", replPair->state);
+ if( authed ) {
+ result.append("remote", replPair->remote);
+ if ( !replPair->info.empty() )
+ result.append("info", replPair->info);
+ }
+ }
+ else {
+ result.append("ismaster", replSettings.slave ? 0 : 1);
+ result.append("msg", "not paired");
+ }
+
+ if ( level ){
+ BSONObjBuilder sources( result.subarrayStart( "sources" ) );
+
+ readlock lk( "local.sources" );
+ Client::Context ctx( "local.sources" );
+ auto_ptr<Cursor> c = findTableScan("local.sources", BSONObj());
+ int n = 0;
+ while ( c->ok() ){
+ BSONObj s = c->current();
+
+ BSONObjBuilder bb;
+ bb.append( s["host"] );
+ string sourcename = s["source"].valuestr();
+ if ( sourcename != "main" )
+ bb.append( s["source"] );
+
+ {
+ BSONElement e = s["syncedTo"];
+ BSONObjBuilder t( bb.subobjStart( "syncedTo" ) );
+ t.appendDate( "time" , e.timestampTime() );
+ t.append( "inc" , e.timestampInc() );
+ t.done();
+ }
+
+ if ( level > 1 ){
+ dbtemprelease unlock;
+ ScopedDbConnection conn( s["host"].valuestr() );
+ BSONObj first = conn->findOne( (string)"local.oplog.$" + sourcename , Query().sort( BSON( "$natural" << 1 ) ) );
+ BSONObj last = conn->findOne( (string)"local.oplog.$" + sourcename , Query().sort( BSON( "$natural" << -1 ) ) );
+ bb.appendDate( "masterFirst" , first["ts"].timestampTime() );
+ bb.appendDate( "masterLast" , last["ts"].timestampTime() );
+ double lag = (double) (last["ts"].timestampTime() - s["syncedTo"].timestampTime());
+ bb.append( "lagSeconds" , lag / 1000 );
+ conn.done();
+ }
+
+ sources.append( BSONObjBuilder::numStr( n++ ) , bb.obj() );
+ c->advance();
+ }
+
+ sources.done();
+ }
+ }
+
class CmdIsMaster : public Command {
public:
virtual bool requiresAuth() { return false; }
virtual bool slaveOk() {
return true;
}
+ virtual LockType locktype(){ return NONE; }
CmdIsMaster() : Command("ismaster") { }
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
/* currently request to arbiter is (somewhat arbitrarily) an ismaster request that is not
@@ -266,30 +347,9 @@ namespace mongo {
we allow unauthenticated ismaster but we aren't as verbose informationally if
one is not authenticated for admin db to be safe.
*/
- AuthenticationInfo *ai = currentClient.get()->ai;
- bool authed = ai->isAuthorized("admin");
-
- if ( replAllDead ) {
- result.append("ismaster", 0.0);
- if( authed ) {
- if ( replPair )
- result.append("remote", replPair->remote);
- result.append("info", replAllDead);
- }
- }
- else if ( replPair ) {
- result.append("ismaster", replPair->state);
- if( authed ) {
- result.append("remote", replPair->remote);
- if ( !replPair->info.empty() )
- result.append("info", replPair->info);
- }
- }
- else {
- result.append("ismaster", slave ? 0 : 1);
- result.append("msg", "not paired");
- }
+ bool authed = cc().getAuthenticationInfo()->isAuthorizedReads("admin");
+ appendReplicationInfo( result , authed );
return true;
}
} cmdismaster;
@@ -300,6 +360,7 @@ namespace mongo {
virtual bool slaveOk() {
return true;
}
+ virtual LockType locktype(){ return WRITE; }
CmdIsInitialSyncComplete() : Command( "isinitialsynccomplete" ) {}
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
result.appendBool( "initialsynccomplete", getInitialSyncCompleted() );
@@ -333,7 +394,7 @@ namespace mongo {
virtual bool adminOnly() {
return true;
}
-
+ virtual LockType locktype(){ return WRITE; }
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
if ( replPair == 0 ) {
massert( 10383 , "Another mongod instance believes incorrectly that this node is its peer", !cmdObj.getBoolField( "fromArbiter" ) );
@@ -541,12 +602,13 @@ namespace mongo {
BSONObj o = jsobj();
log( 1 ) << "Saving repl source: " << o << endl;
- OpDebug debug;
- setClient("local.sources");
- UpdateResult res = updateObjects("local.sources", o, pattern, true/*upsert for pair feature*/, false,false,debug);
- assert( ! res.mod );
- assert( res.num == 1 );
- cc().clearns();
+ {
+ OpDebug debug;
+ Client::Context ctx("local.sources");
+ UpdateResult res = updateObjects("local.sources", o, pattern, true/*upsert for pair feature*/, false,false,debug);
+ assert( ! res.mod );
+ assert( res.num == 1 );
+ }
if ( replacing ) {
/* if we were in "replace" mode, we now have synced up with the replacement,
@@ -578,13 +640,13 @@ namespace mongo {
and cursor in effect.
*/
void ReplSource::loadAll(SourceVector &v) {
+ Client::Context ctx("local.sources");
SourceVector old = v;
v.clear();
bool gotPairWith = false;
if ( !cmdLine.source.empty() ) {
- setClient("local.sources");
// --source <host> specified.
// check that no items are in sources other than that
// add if missing
@@ -594,8 +656,8 @@ namespace mongo {
n++;
ReplSource tmp(c->current());
if ( tmp.hostName != cmdLine.source ) {
- log() << "--source " << cmdLine.source << " != " << tmp.hostName << " from local.sources collection" << endl;
- log() << "terminating after 30 seconds" << endl;
+ log() << "repl: --source " << cmdLine.source << " != " << tmp.hostName << " from local.sources collection" << endl;
+ log() << "repl: terminating mongod after 30 seconds" << endl;
sleepsecs(30);
dbexit( EXIT_REPLICATION_ERROR );
}
@@ -626,8 +688,10 @@ namespace mongo {
if ( replPair ) {
const string &remote = replPair->remote;
- setClient( "local.sources" );
// --pairwith host specified.
+ if ( replSettings.fastsync ) {
+ Helpers::emptyCollection( "local.sources" ); // ignore saved sources
+ }
// check that no items are in sources other than that
// add if missing
auto_ptr<Cursor> c = findTableScan("local.sources", BSONObj());
@@ -652,7 +716,6 @@ namespace mongo {
}
}
- setClient("local.sources");
auto_ptr<Cursor> c = findTableScan("local.sources", BSONObj());
while ( c->ok() ) {
ReplSource tmp(c->current());
@@ -664,11 +727,21 @@ namespace mongo {
tmp.syncedTo = OpTime();
tmp.replacing = true;
}
+ }
+ if ( ( !replPair && tmp.syncedTo.isNull() ) ||
+ ( replPair && replSettings.fastsync ) ) {
+ DBDirectClient c;
+ if ( c.exists( "local.oplog.$main" ) ) {
+ BSONObj op = c.findOne( "local.oplog.$main", Query().sort( BSON( "$natural" << -1 ) ) );
+ if ( !op.isEmpty() ) {
+ tmp.syncedTo = op[ "ts" ].date();
+ tmp._lastSavedLocalTs = op[ "ts" ].date();
+ }
+ }
}
addSourceToList(v, tmp, c->current(), old);
c->advance();
}
- cc().clearns();
if ( !gotPairWith && replPair ) {
/* add the --pairwith server */
@@ -732,7 +805,7 @@ namespace mongo {
string ReplSource::resyncDrop( const char *db, const char *requester ) {
log() << "resync: dropping database " << db << endl;
string dummyns = string( db ) + ".";
- setClient(dummyns.c_str());
+ Client::Context ctx(dummyns);
assert( cc().database()->name == db );
dropDatabase(dummyns.c_str());
return dummyns;
@@ -741,9 +814,9 @@ namespace mongo {
/* grab initial copy of a database from the master */
bool ReplSource::resync(string db) {
string dummyNs = resyncDrop( db.c_str(), "internal" );
- setClient( dummyNs.c_str() );
+ Client::Context ctx( dummyNs );
{
- log() << "resync: cloning database " << db << endl;
+ log() << "resync: cloning database " << db << " to get an initial copy" << endl;
ReplInfo r("resync: cloning a database");
string errmsg;
bool ok = cloneFrom(hostName.c_str(), errmsg, cc().database()->name, false, /*slaveok*/ true, /*replauth*/ true, /*snapshot*/false);
@@ -753,7 +826,7 @@ namespace mongo {
}
}
- log() << "resync: done " << db << endl;
+ log() << "resync: done with initial clone for db: " << db << endl;
return true;
}
@@ -864,29 +937,21 @@ namespace mongo {
throw SyncException();
}
- bool justCreated;
- try {
- justCreated = setClient(ns);
- } catch ( AssertionException& ) {
- problem() << "skipping bad(?) op in oplog, setClient() failed, ns: '" << ns << "'\n";
- addDbNextPass.erase(clientName);
- return;
- }
+ Client::Context ctx( ns );
- bool empty = cc().database()->isEmpty();
+ bool empty = ctx.db()->isEmpty();
bool incompleteClone = incompleteCloneDbs.count( clientName ) != 0;
- log( 6 ) << "ns: " << ns << ", justCreated: " << justCreated << ", empty: " << empty << ", incompleteClone: " << incompleteClone << endl;
-
- // always apply admin command command
- // this is a bit hacky -- the semantics of replication/commands aren't well specified
- if ( strcmp( clientName, "admin" ) == 0 && *op.getStringField( "op" ) == 'c' ) {
- applyOperation( op );
- cc().clearns();
- return;
- }
+ log( 6 ) << "ns: " << ns << ", justCreated: " << ctx.justCreated() << ", empty: " << empty << ", incompleteClone: " << incompleteClone << endl;
- if ( justCreated || empty || incompleteClone ) {
+ // always apply admin command command
+ // this is a bit hacky -- the semantics of replication/commands aren't well specified
+ if ( strcmp( clientName, "admin" ) == 0 && *op.getStringField( "op" ) == 'c' ) {
+ applyOperation( op );
+ return;
+ }
+
+ if ( ctx.justCreated() || empty || incompleteClone ) {
// we must add to incomplete list now that setClient has been called
incompleteCloneDbs.insert( clientName );
if ( nClonedThisPass ) {
@@ -901,9 +966,9 @@ namespace mongo {
log() << "An earlier initial clone of '" << clientName << "' did not complete, now resyncing." << endl;
}
save();
- setClient( ns );
+ Client::Context ctx(ns);
nClonedThisPass++;
- resync(cc().database()->name);
+ resync(ctx.db()->name);
addDbNextPass.erase(clientName);
incompleteCloneDbs.erase( clientName );
}
@@ -927,7 +992,6 @@ namespace mongo {
}
addDbNextPass.erase( clientName );
}
- cc().clearns();
}
BSONObj ReplSource::idForOp( const BSONObj &op, bool &mod ) {
@@ -981,16 +1045,20 @@ namespace mongo {
void ReplSource::syncToTailOfRemoteLog() {
string _ns = ns();
- BSONObj last = conn->findOne( _ns.c_str(), Query().sort( BSON( "$natural" << -1 ) ) );
+ BSONObjBuilder b;
+ if ( !only.empty() ) {
+ b.appendRegex("ns", string("^") + only);
+ }
+ BSONObj last = conn->findOne( _ns.c_str(), Query( b.done() ).sort( BSON( "$natural" << -1 ) ) );
if ( !last.isEmpty() ) {
- BSONElement ts = last.findElement( "ts" );
+ BSONElement ts = last.getField( "ts" );
massert( 10386 , "non Date ts found", ts.type() == Date || ts.type() == Timestamp );
syncedTo = OpTime( ts.date() );
}
}
OpTime ReplSource::nextLastSavedLocalTs() const {
- setClient( "local.oplog.$main" );
+ Client::Context ctx( "local.oplog.$main" );
auto_ptr< Cursor > c = findTableScan( "local.oplog.$main", BSON( "$natural" << -1 ) );
if ( c->ok() )
return OpTime( c->current().getField( "ts" ).date() );
@@ -1003,7 +1071,10 @@ namespace mongo {
}
void ReplSource::resetSlave() {
- massert( 10387 , "request to kill slave replication falied",
+ log() << "**********************************************************\n";
+ log() << "Sending forcedead command to slave to stop its replication\n";
+ log() << "Host: " << hostName << " paired: " << paired << endl;
+ massert( 10387 , "request to kill slave replication failed",
conn->simpleCommand( "admin", 0, "forcedead" ) );
syncToTailOfRemoteLog();
{
@@ -1015,7 +1086,7 @@ namespace mongo {
}
bool ReplSource::updateSetsWithLocalOps( OpTime &localLogTail, bool mayUnlock ) {
- setClient( "local.oplog.$main" );
+ Client::Context ctx( "local.oplog.$main" );
auto_ptr< Cursor > localLog = findTableScan( "local.oplog.$main", BSON( "$natural" << -1 ) );
OpTime newTail;
for( ; localLog->ok(); localLog->advance() ) {
@@ -1153,67 +1224,70 @@ namespace mongo {
}
return true;
}
-
- int n = 0;
- BSONObj op = c->next();
- BSONElement ts = op.findElement("ts");
- if ( ts.type() != Date && ts.type() != Timestamp ) {
- string err = op.getStringField("$err");
- if ( !err.empty() ) {
- problem() << "repl: $err reading remote oplog: " + err << '\n';
- massert( 10390 , "got $err reading remote oplog", false );
- }
- else {
- problem() << "repl: bad object read from remote oplog: " << op.toString() << '\n';
- massert( 10391 , "repl: bad object read from remote oplog", false);
+
+ OpTime nextOpTime;
+ {
+ BSONObj op = c->next();
+ BSONElement ts = op.getField("ts");
+ if ( ts.type() != Date && ts.type() != Timestamp ) {
+ string err = op.getStringField("$err");
+ if ( !err.empty() ) {
+ problem() << "repl: $err reading remote oplog: " + err << '\n';
+ massert( 10390 , "got $err reading remote oplog", false );
+ }
+ else {
+ problem() << "repl: bad object read from remote oplog: " << op.toString() << '\n';
+ massert( 10391 , "repl: bad object read from remote oplog", false);
+ }
}
- }
- if ( replPair && replPair->state == ReplPair::State_Master ) {
+ if ( replPair && replPair->state == ReplPair::State_Master ) {
- OpTime nextOpTime( ts.date() );
- if ( !tailing && !initial && nextOpTime != syncedTo ) {
- log() << "remote slave log filled, forcing slave resync" << endl;
- resetSlave();
- return true;
- }
+ OpTime next( ts.date() );
+ if ( !tailing && !initial && next != syncedTo ) {
+ log() << "remote slave log filled, forcing slave resync" << endl;
+ resetSlave();
+ return true;
+ }
- dblock lk;
- updateSetsWithLocalOps( localLogTail, true );
- }
+ dblock lk;
+ updateSetsWithLocalOps( localLogTail, true );
+ }
- OpTime nextOpTime( ts.date() );
- log(2) << "repl: first op time received: " << nextOpTime.toString() << '\n';
- if ( tailing || initial ) {
- if ( initial )
- log(1) << "repl: initial run\n";
- else
+ nextOpTime = OpTime( ts.date() );
+ log(2) << "repl: first op time received: " << nextOpTime.toString() << '\n';
+ if ( tailing || initial ) {
+ if ( initial )
+ log(1) << "repl: initial run\n";
+ else
+ assert( syncedTo < nextOpTime );
+ c->putBack( op ); // op will be processed in the loop below
+ nextOpTime = OpTime(); // will reread the op below
+ }
+ else if ( nextOpTime != syncedTo ) { // didn't get what we queried for - error
+ Nullstream& l = log();
+ l << "repl: nextOpTime " << nextOpTime.toStringLong() << ' ';
+ if ( nextOpTime < syncedTo )
+ l << "<??";
+ else
+ l << ">";
+
+ l << " syncedTo " << syncedTo.toStringLong() << '\n';
+ log() << "repl: time diff: " << (nextOpTime.getSecs() - syncedTo.getSecs()) << "sec\n";
+ log() << "repl: tailing: " << tailing << '\n';
+ log() << "repl: data too stale, halting replication" << endl;
+ replInfo = replAllDead = "data too stale halted replication";
assert( syncedTo < nextOpTime );
- sync_pullOpLog_applyOperation(op, &localLogTail);
- n++;
- }
- else if ( nextOpTime != syncedTo ) {
- Nullstream& l = log();
- l << "repl: nextOpTime " << nextOpTime.toStringLong() << ' ';
- if ( nextOpTime < syncedTo )
- l << "<??";
- else
- l << ">";
-
- l << " syncedTo " << syncedTo.toStringLong() << '\n';
- log() << "repl: time diff: " << (nextOpTime.getSecs() - syncedTo.getSecs()) << "sec\n";
- log() << "repl: tailing: " << tailing << '\n';
- log() << "repl: data too stale, halting replication" << endl;
- replInfo = replAllDead = "data too stale halted replication";
- assert( syncedTo < nextOpTime );
- throw SyncException();
- }
- else {
- /* t == syncedTo, so the first op was applied previously. */
+ throw SyncException();
+ }
+ else {
+ /* t == syncedTo, so the first op was applied previously. */
+ }
}
// apply operations
{
+ int n = 0;
time_t saveLast = time(0);
while ( 1 ) {
/* from a.s.:
@@ -1232,7 +1306,7 @@ namespace mongo {
*/
if ( !c->more() ) {
dblock lk;
- OpTime nextLastSaved = nextLastSavedLocalTs(); // this may make c->more() become true
+ OpTime nextLastSaved = nextLastSavedLocalTs();
{
dbtemprelease t;
if ( c->more() ) {
@@ -1245,11 +1319,11 @@ namespace mongo {
save(); // note how far we are synced up to now
log() << "repl: applied " << n << " operations" << endl;
nApplied = n;
- log() << "repl: end sync_pullOpLog syncedTo: " << syncedTo.toStringLong() << endl;
+ log() << "repl: end sync_pullOpLog syncedTo: " << syncedTo.toStringLong() << endl;
break;
}
- OCCASIONALLY if( n > 100000 || time(0) - saveLast > 60 ) {
+ OCCASIONALLY if( n > 0 && ( n > 100000 || time(0) - saveLast > 60 ) ) {
// periodically note our progress, in case we are doing a lot of work and crash
dblock lk;
syncedTo = nextOpTime;
@@ -1262,14 +1336,36 @@ namespace mongo {
}
BSONObj op = c->next();
- ts = op.findElement("ts");
- assert( ts.type() == Date || ts.type() == Timestamp );
+ BSONElement ts = op.getField("ts");
+ if( !( ts.type() == Date || ts.type() == Timestamp ) ) {
+ log() << "sync error: problem querying remote oplog record\n";
+ log() << "op: " << op.toString() << '\n';
+ log() << "halting replication" << endl;
+ replInfo = replAllDead = "sync error: no ts found querying remote oplog record";
+ throw SyncException();
+ }
OpTime last = nextOpTime;
- OpTime tmp( ts.date() );
- nextOpTime = tmp;
+ nextOpTime = OpTime( ts.date() );
if ( !( last < nextOpTime ) ) {
- problem() << "sync error: last " << last.toString() << " >= nextOpTime " << nextOpTime.toString() << endl;
- uassert( 10123 , "bad 'ts' value in sources", false);
+ log() << "sync error: last applied optime at slave >= nextOpTime from master" << endl;
+ log() << " last: " << last.toStringLong() << '\n';
+ log() << " nextOpTime: " << nextOpTime.toStringLong() << '\n';
+ log() << " halting replication" << endl;
+ replInfo = replAllDead = "sync error last >= nextOpTime";
+ uassert( 10123 , "replication error last applied optime at slave >= nextOpTime from master", false);
+ }
+ if ( replSettings.slavedelay && ( unsigned( time( 0 ) ) < nextOpTime.getSecs() + replSettings.slavedelay ) ) {
+ c->putBack( op );
+ _sleepAdviceTime = nextOpTime.getSecs() + replSettings.slavedelay + 1;
+ dblock lk;
+ if ( n > 0 ) {
+ syncedTo = last;
+ save();
+ }
+ log() << "repl: applied " << n << " operations" << endl;
+ log() << "repl: syncedTo: " << syncedTo.toStringLong() << endl;
+ log() << "waiting until: " << _sleepAdviceTime << " to continue" << endl;
+ break;
}
sync_pullOpLog_applyOperation(op, &localLogTail);
@@ -1283,8 +1379,7 @@ namespace mongo {
BSONObj userReplQuery = fromjson("{\"user\":\"repl\"}");
bool replAuthenticate(DBClientConnection *conn) {
- AuthenticationInfo *ai = currentClient.get()->ai;
- if( !ai->isAuthorized("admin") ) {
+ if( ! cc().isAdmin() ){
log() << "replauthenticate: requires admin permissions, failing\n";
return false;
}
@@ -1324,7 +1419,7 @@ namespace mongo {
ReplInfo r("trying to connect to sync source");
if ( !conn->connect(hostName.c_str(), errmsg) || !replAuthenticate(conn.get()) ) {
resetConnection();
- log() << "repl: " << errmsg << endl;
+ log() << "repl: " << errmsg << endl;
return false;
}
}
@@ -1335,9 +1430,16 @@ namespace mongo {
returns true if everything happy. return false if you want to reconnect.
*/
bool ReplSource::sync(int& nApplied) {
+ _sleepAdviceTime = 0;
ReplInfo r("sync");
- if ( !cmdLine.quiet )
- log() << "repl: " << sourceName() << '@' << hostName << endl;
+ if ( !cmdLine.quiet ) {
+ Nullstream& l = log();
+ l << "repl: from ";
+ if( sourceName() != "main" ) {
+ l << "source:" << sourceName() << ' ';
+ }
+ l << "host:" << hostName << endl;
+ }
nClonedThisPass = 0;
// FIXME Handle cases where this db isn't on default port, or default port is spec'd in hostName.
@@ -1348,13 +1450,11 @@ namespace mongo {
}
if ( !connect() ) {
+ log(4) << "repl: can't connect to sync source" << endl;
if ( replPair && paired ) {
assert( startsWith(hostName.c_str(), replPair->remoteHost.c_str()) );
replPair->arbitrate();
}
- {
- ReplInfo r("can't connect to sync source");
- }
return false;
}
@@ -1370,7 +1470,7 @@ namespace mongo {
/*
// get current mtime at the server.
BSONObj o = conn->findOne("admin.$cmd", opTimeQuery);
- BSONElement e = o.findElement("optime");
+ BSONElement e = o.getField("optime");
if( e.eoo() ) {
log() << "repl: failed to get cur optime from master" << endl;
log() << " " << o.toString() << endl;
@@ -1387,24 +1487,13 @@ namespace mongo {
// cached copies of these...so don't rename them
NamespaceDetails *localOplogMainDetails = 0;
- Database *localOplogClient = 0;
+ Database *localOplogDB = 0;
+
+ void replCheckCloseDatabase( Database * db ){
+ localOplogDB = 0;
+ localOplogMainDetails = 0;
+ }
- void logOp(const char *opstr, const char *ns, const BSONObj& obj, BSONObj *patt, bool *b) {
- if ( master ) {
- _logOp(opstr, ns, "local.oplog.$main", obj, patt, b, OpTime::now());
- char cl[ 256 ];
- nsToDatabase( ns, cl );
- }
- NamespaceDetailsTransient &t = NamespaceDetailsTransient::get_w( ns );
- if ( t.cllEnabled() ) {
- try {
- _logOp(opstr, ns, t.cllNS().c_str(), obj, patt, b, OpTime::now());
- } catch ( const DBException & ) {
- t.cllInvalidate();
- }
- }
- }
-
/* we write to local.opload.$main:
{ ts : ..., op: ..., ns: ..., o: ... }
ts: an OpTime timestamp
@@ -1415,6 +1504,7 @@ namespace mongo {
"c" db cmd
"db" declares presence of a database (ns is set to the db name + '.')
"n" no op
+ logNS - e.g. "local.oplog.$main"
bb:
if not null, specifies a boolean to pass along to the other side as b: param.
used for "justOne" or "upsert" flags on 'd', 'u'
@@ -1422,7 +1512,7 @@ namespace mongo {
when set, indicates this is the first thing we have logged for this database.
thus, the slave does not need to copy down all the data when it sees this.
*/
- void _logOp(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb, const OpTime &ts ) {
+ static void _logOp(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb, const OpTime &ts ) {
if ( strncmp(ns, "local.", 6) == 0 )
return;
@@ -1449,14 +1539,14 @@ namespace mongo {
Record *r;
if ( strncmp( logNS, "local.", 6 ) == 0 ) { // For now, assume this is olog main
if ( localOplogMainDetails == 0 ) {
- setClient("local.");
- localOplogClient = cc().database();
+ Client::Context ctx("local.", dbpath, 0, false);
+ localOplogDB = ctx.db();
localOplogMainDetails = nsdetails(logNS);
}
- cc().setns("", localOplogClient); // database = localOplogClient;
+ Client::Context ctx( "" , localOplogDB, false );
r = theDataFileMgr.fast_oplog_insert(localOplogMainDetails, logNS, len);
} else {
- setClient( logNS );
+ Client::Context ctx( logNS, dbpath, 0, false );
assert( nsdetails( logNS ) );
r = theDataFileMgr.fast_oplog_insert( nsdetails( logNS ), logNS, len);
}
@@ -1478,6 +1568,27 @@ namespace mongo {
}
}
+ static void logKeepalive() {
+ BSONObj obj;
+ _logOp("n", "", "local.oplog.$main", obj, 0, 0, OpTime::now());
+ }
+
+ void logOp(const char *opstr, const char *ns, const BSONObj& obj, BSONObj *patt, bool *b) {
+ if ( replSettings.master ) {
+ _logOp(opstr, ns, "local.oplog.$main", obj, patt, b, OpTime::now());
+ char cl[ 256 ];
+ nsToDatabase( ns, cl );
+ }
+ NamespaceDetailsTransient &t = NamespaceDetailsTransient::get_w( ns );
+ if ( t.cllEnabled() ) {
+ try {
+ _logOp(opstr, ns, t.cllNS().c_str(), obj, patt, b, OpTime::now());
+ } catch ( const DBException & ) {
+ t.cllInvalidate();
+ }
+ }
+ }
+
/* --------------------------------------------------------------*/
/*
@@ -1517,6 +1628,9 @@ namespace mongo {
else if( moreToSync ) {
sleepAdvice = 0;
}
+ else if ( s->sleepAdvice() ) {
+ sleepAdvice = s->sleepAdvice();
+ }
if ( ok && !moreToSync /*&& !s->syncedTo.isNull()*/ ) {
pairSync->setInitialSyncCompletedLocking();
}
@@ -1560,10 +1674,10 @@ namespace mongo {
{
dblock lk;
if ( replAllDead ) {
- if ( !autoresync || !ReplSource::throttledForceResyncDead( "auto" ) )
+ if ( !replSettings.autoresync || !ReplSource::throttledForceResyncDead( "auto" ) )
break;
}
- assert( syncing == 0 );
+ assert( syncing == 0 ); // i.e., there is only one sync thread running. we will want to change/fix this.
syncing++;
}
try {
@@ -1590,7 +1704,8 @@ namespace mongo {
stringstream ss;
ss << "repl: sleep " << s << "sec before next pass";
string msg = ss.str();
- log() << msg << endl;
+ if ( ! cmdLine.quiet )
+ log() << msg << endl;
ReplInfo r(msg.c_str());
sleepsecs(s);
}
@@ -1599,14 +1714,38 @@ namespace mongo {
int debug_stop_repl = 0;
+ static void replMasterThread() {
+ sleepsecs(4);
+ Client::initThread("replmaster");
+ while( 1 ) {
+ {
+ dblock lk;
+ cc().getAuthenticationInfo()->authorize("admin");
+ }
+ sleepsecs(10);
+ /* write a keep-alive like entry to the log. this will make things like
+ printReplicationStatus() and printSlaveReplicationStatus() stay up-to-date
+ even when things are idle.
+ */
+ {
+ writelock lk("");
+ try {
+ logKeepalive();
+ }
+ catch(...) {
+ log() << "caught exception in replMasterThread()" << endl;
+ }
+ }
+ }
+ }
+
void replSlaveThread() {
sleepsecs(1);
-
+ Client::initThread("replslave");
+
{
dblock lk;
-
- Client::initThread("replslave");
- currentClient.get()->ai->authorize("admin");
+ cc().getAuthenticationInfo()->authorize("admin");
BSONObj obj;
if ( Helpers::getSingleton("local.pair.startup", obj) ) {
@@ -1642,10 +1781,16 @@ namespace mongo {
dblock lk;
const char * ns = "local.oplog.$main";
- setClient(ns);
+ Client::Context ctx(ns);
- if ( nsdetails( ns ) )
+ if ( nsdetails( ns ) ) {
+ DBDirectClient c;
+ BSONObj lastOp = c.findOne( ns, Query().sort( BSON( "$natural" << -1 ) ) );
+ if ( !lastOp.isEmpty() ) {
+ OpTime::setLast( lastOp[ "ts" ].date() );
+ }
return;
+ }
/* create an oplog collection, if it doesn't yet exist. */
BSONObjBuilder b;
@@ -1653,13 +1798,19 @@ namespace mongo {
if ( cmdLine.oplogSize != 0 )
sz = (double)cmdLine.oplogSize;
else {
+ /* not specified. pick a default size */
sz = 50.0 * 1000 * 1000;
if ( sizeof(int *) >= 8 ) {
+#if defined(__APPLE__)
+ // typically these are desktops (dev machines), so keep it smallish
+ sz = (256-64) * 1000 * 1000;
+#else
sz = 990.0 * 1000 * 1000;
boost::intmax_t free = freeSpace(); //-1 if call not supported.
double fivePct = free * 0.05;
if ( fivePct > sz )
sz = fivePct;
+#endif
}
}
@@ -1675,7 +1826,6 @@ namespace mongo {
BSONObj o = b.done();
userCreateNS(ns, o, err, false);
logOp( "n", "dummy", BSONObj() );
- cc().clearns();
}
void startReplication() {
@@ -1684,29 +1834,31 @@ namespace mongo {
*/
//boost::thread tempt(tempThread);
- if ( !slave && !master && !replPair )
+ if ( !replSettings.slave && !replSettings.master && !replPair )
return;
{
dblock lk;
+ cc().getAuthenticationInfo()->authorize("admin");
pairSync->init();
}
- if ( slave || replPair ) {
- if ( slave ) {
- assert( slave == SimpleSlave );
+ if ( replSettings.slave || replPair ) {
+ if ( replSettings.slave ) {
+ assert( replSettings.slave == SimpleSlave );
log(1) << "slave=true" << endl;
}
else
- slave = ReplPairSlave;
+ replSettings.slave = ReplPairSlave;
boost::thread repl_thread(replSlaveThread);
}
- if ( master || replPair ) {
- if ( master )
+ if ( replSettings.master || replPair ) {
+ if ( replSettings.master )
log(1) << "master=true" << endl;
- master = true;
+ replSettings.master = true;
createOplog();
+ boost::thread t(replMasterThread);
}
}
@@ -1720,6 +1872,7 @@ namespace mongo {
virtual bool slaveOk() {
return false;
}
+ virtual LockType locktype(){ return WRITE; }
CmdLogCollection() : Command( "logCollection" ) {}
virtual void help( stringstream &help ) const {
help << "examples: { logCollection: <collection ns>, start: 1 }, "
diff --git a/db/repl.h b/db/repl.h
index a4c1737..c5e0f63 100644
--- a/db/repl.h
+++ b/db/repl.h
@@ -32,6 +32,7 @@
#include "db.h"
#include "dbhelpers.h"
#include "query.h"
+#include "queryoptimizer.h"
#include "../client/dbclient.h"
@@ -46,14 +47,31 @@ namespace mongo {
--slave cmd line setting -> SimpleSlave
*/
typedef enum { NotSlave=0, SimpleSlave, ReplPairSlave } SlaveTypes;
- extern SlaveTypes slave;
- /* true means we are master and doing replication. if we are not writing to oplog (no --master or repl pairing),
- this won't be true.
- */
- extern bool master;
+ class ReplSettings {
+ public:
+ SlaveTypes slave;
+
+ /* true means we are master and doing replication. if we are not writing to oplog (no --master or repl pairing),
+ this won't be true.
+ */
+ bool master;
+
+ int opIdMem;
+
+ bool fastsync;
+
+ bool autoresync;
+
+ int slavedelay;
+
+ ReplSettings()
+ : slave(NotSlave) , master(false) , opIdMem(100000000) , fastsync() , autoresync(false), slavedelay() {
+ }
+
+ };
- extern int opIdMem;
+ extern ReplSettings replSettings;
bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication,
bool slaveOk, bool useReplAuth, bool snapshot);
@@ -115,6 +133,7 @@ namespace mongo {
// returns false if the slave has been reset
bool updateSetsWithLocalOps( OpTime &localLogTail, bool mayUnlock );
string ns() const { return string( "local.oplog.$" ) + sourceName(); }
+ unsigned _sleepAdviceTime;
public:
static void applyOperation(const BSONObj& op);
@@ -131,11 +150,11 @@ namespace mongo {
OpTime syncedTo;
/* This is for repl pairs.
- _lastSavedLocalTs is the most recent point in the local log that we know is consistent
- with the remote log ( ie say the local op log has entries ABCDE and the remote op log
- has ABCXY, then _lastSavedLocalTs won't be greater than C until we have reconciled
- the DE-XY difference.)
- */
+ _lastSavedLocalTs is the most recent point in the local log that we know is consistent
+ with the remote log ( ie say the local op log has entries ABCDE and the remote op log
+ has ABCXY, then _lastSavedLocalTs won't be greater than C until we have reconciled
+ the DE-XY difference.)
+ */
OpTime _lastSavedLocalTs;
int nClonedThisPass;
@@ -160,7 +179,13 @@ namespace mongo {
operator string() const { return sourceName() + "@" + hostName; }
bool haveMoreDbsToSync() const { return !addDbNextPass.empty(); }
-
+ int sleepAdvice() const {
+ if ( !_sleepAdviceTime )
+ return 0;
+ int wait = _sleepAdviceTime - unsigned( time( 0 ) );
+ return wait > 0 ? wait : 0;
+ }
+
static bool throttledForceResyncDead( const char *requester );
static void forceResyncDead( const char *requester );
void forceResync( const char *requester );
@@ -173,7 +198,6 @@ namespace mongo {
"c" db cmd
"db" declares presence of a database (ns is set to the db name + '.')
*/
- void _logOp(const char *opstr, const char *ns, const char *logNs, const BSONObj& obj, BSONObj *patt, bool *b, const OpTime &ts);
void logOp(const char *opstr, const char *ns, const BSONObj& obj, BSONObj *patt = 0, bool *b = 0);
// class for managing a set of ids in memory
@@ -239,9 +263,9 @@ namespace mongo {
dbIds_( "local.temp.replIds" ),
dbModIds_( "local.temp.replModIds" ),
inMem_( true ),
- maxMem_( opIdMem ) {
+ maxMem_( replSettings.opIdMem ) {
}
- void reset( int maxMem = opIdMem ) {
+ void reset( int maxMem = replSettings.opIdMem ) {
memIds_.reset();
memModIds_.reset();
dbIds_.reset();
@@ -312,4 +336,146 @@ namespace mongo {
int maxMem_;
};
+ bool anyReplEnabled();
+ void appendReplicationInfo( BSONObjBuilder& result , bool authed , int level = 0 );
+
+ void replCheckCloseDatabase( Database * db );
+
+ extern int __findingStartInitialTimeout; // configurable for testing
+
+ class FindingStartCursor {
+ public:
+ FindingStartCursor( const QueryPlan & qp ) :
+ _qp( qp ),
+ _findingStart( true ),
+ _findingStartMode(),
+ _findingStartTimer( 0 ),
+ _findingStartCursor( 0 )
+ { init(); }
+ bool done() const { return !_findingStart; }
+ auto_ptr< Cursor > cRelease() { return _c; }
+ void next() {
+ if ( !_findingStartCursor || !_findingStartCursor->c->ok() ) {
+ _findingStart = false;
+ _c = _qp.newCursor(); // on error, start from beginning
+ destroyClientCursor();
+ return;
+ }
+ switch( _findingStartMode ) {
+ case Initial: {
+ if ( !_matcher->matches( _findingStartCursor->c->currKey(), _findingStartCursor->c->currLoc() ) ) {
+ _findingStart = false; // found first record out of query range, so scan normally
+ _c = _qp.newCursor( _findingStartCursor->c->currLoc() );
+ destroyClientCursor();
+ return;
+ }
+ _findingStartCursor->c->advance();
+ RARELY {
+ if ( _findingStartTimer.seconds() >= __findingStartInitialTimeout ) {
+ createClientCursor( startLoc( _findingStartCursor->c->currLoc() ) );
+ _findingStartMode = FindExtent;
+ return;
+ }
+ }
+ maybeRelease();
+ return;
+ }
+ case FindExtent: {
+ if ( !_matcher->matches( _findingStartCursor->c->currKey(), _findingStartCursor->c->currLoc() ) ) {
+ _findingStartMode = InExtent;
+ return;
+ }
+ DiskLoc prev = prevLoc( _findingStartCursor->c->currLoc() );
+ if ( prev.isNull() ) { // hit beginning, so start scanning from here
+ createClientCursor();
+ _findingStartMode = InExtent;
+ return;
+ }
+ // There might be a more efficient implementation than creating new cursor & client cursor each time,
+ // not worrying about that for now
+ createClientCursor( prev );
+ maybeRelease();
+ return;
+ }
+ case InExtent: {
+ if ( _matcher->matches( _findingStartCursor->c->currKey(), _findingStartCursor->c->currLoc() ) ) {
+ _findingStart = false; // found first record in query range, so scan normally
+ _c = _qp.newCursor( _findingStartCursor->c->currLoc() );
+ destroyClientCursor();
+ return;
+ }
+ _findingStartCursor->c->advance();
+ maybeRelease();
+ return;
+ }
+ default: {
+ massert( 12600, "invalid _findingStartMode", false );
+ }
+ }
+ }
+ private:
+ enum FindingStartMode { Initial, FindExtent, InExtent };
+ const QueryPlan &_qp;
+ bool _findingStart;
+ FindingStartMode _findingStartMode;
+ auto_ptr< CoveredIndexMatcher > _matcher;
+ Timer _findingStartTimer;
+ ClientCursor * _findingStartCursor;
+ auto_ptr< Cursor > _c;
+ DiskLoc startLoc( const DiskLoc &rec ) {
+ Extent *e = rec.rec()->myExtent( rec );
+ if ( e->myLoc != _qp.nsd()->capExtent )
+ return e->firstRecord;
+ // Likely we are on the fresh side of capExtent, so return first fresh record.
+ // If we are on the stale side of capExtent, then the collection is small and it
+ // doesn't matter if we start the extent scan with capFirstNewRecord.
+ return _qp.nsd()->capFirstNewRecord;
+ }
+
+ DiskLoc prevLoc( const DiskLoc &rec ) {
+ Extent *e = rec.rec()->myExtent( rec );
+ if ( e->xprev.isNull() )
+ e = _qp.nsd()->lastExtent.ext();
+ else
+ e = e->xprev.ext();
+ if ( e->myLoc != _qp.nsd()->capExtent )
+ return e->firstRecord;
+ return DiskLoc(); // reached beginning of collection
+ }
+ void createClientCursor( const DiskLoc &startLoc = DiskLoc() ) {
+ auto_ptr<Cursor> c = _qp.newCursor( startLoc );
+ _findingStartCursor = new ClientCursor(c, _qp.ns(), false);
+ }
+ void destroyClientCursor() {
+ if ( _findingStartCursor ) {
+ ClientCursor::erase( _findingStartCursor->cursorid );
+ _findingStartCursor = 0;
+ }
+ }
+ void maybeRelease() {
+ RARELY {
+ CursorId id = _findingStartCursor->cursorid;
+ _findingStartCursor->updateLocation();
+ {
+ dbtemprelease t;
+ }
+ _findingStartCursor = ClientCursor::find( id, false );
+ }
+ }
+ void init() {
+ // Use a ClientCursor here so we can release db mutex while scanning
+ // oplog (can take quite a while with large oplogs).
+ auto_ptr<Cursor> c = _qp.newReverseCursor();
+ _findingStartCursor = new ClientCursor(c, _qp.ns(), false);
+ _findingStartTimer.reset();
+ _findingStartMode = Initial;
+ BSONElement tsElt = _qp.query()[ "ts" ];
+ massert( 13044, "no ts field in query", !tsElt.eoo() );
+ BSONObjBuilder b;
+ b.append( tsElt );
+ BSONObj tsQuery = b.obj();
+ _matcher.reset(new CoveredIndexMatcher(tsQuery, _qp.indexKey()));
+ }
+ };
+
} // namespace mongo
diff --git a/db/replset.h b/db/replset.h
index 98d80d6..66a8604 100644
--- a/db/replset.h
+++ b/db/replset.h
@@ -49,13 +49,13 @@ namespace mongo {
};
int state;
- string info; // commentary about our current state
+ ThreadSafeString info; // commentary about our current state
string arbHost; // "-" for no arbiter. "host[:port]"
int remotePort;
string remoteHost;
string remote; // host:port if port specified.
// int date; // -1 not yet set; 0=slave; 1=master
-
+
string getInfo() {
stringstream ss;
ss << " state: ";
@@ -111,7 +111,7 @@ namespace mongo {
If 'client' is not specified, the current client is used.
*/
inline bool isMaster( const char *client = 0 ) {
- if( !slave )
+ if( ! replSettings.slave )
return true;
if ( !client ) {
@@ -128,7 +128,7 @@ namespace mongo {
return true;
}
else {
- if( master ) {
+ if( replSettings.master ) {
// if running with --master --slave, allow. note that master is also true
// for repl pairs so the check for replPair above is important.
return true;
diff --git a/db/scanandorder.h b/db/scanandorder.h
index 3f41433..f038069 100644
--- a/db/scanandorder.h
+++ b/db/scanandorder.h
@@ -40,7 +40,7 @@ namespace mongo {
// returns the key value for o
BSONObj getKeyFromObject(BSONObj o) {
- return o.extractFields(pattern);
+ return o.extractFields(pattern,true);
}
};
diff --git a/db/security.cpp b/db/security.cpp
index 747b04a..6a01627 100644
--- a/db/security.cpp
+++ b/db/security.cpp
@@ -21,12 +21,44 @@
#include "instance.h"
#include "client.h"
#include "curop.h"
+#include "db.h"
+#include "dbhelpers.h"
namespace mongo {
bool noauth = true;
-
+
int AuthenticationInfo::warned = 0;
+ void AuthenticationInfo::print(){
+ cout << "AuthenticationInfo: " << this << "\n";
+ for ( map<string,Auth>::iterator i=m.begin(); i!=m.end(); i++ ){
+ cout << "\t" << i->first << "\t" << i->second.level << "\n";
+ }
+ cout << "END" << endl;
+ }
+
+
+ bool AuthenticationInfo::_isAuthorizedSpecialChecks( const string& dbname ) {
+ if ( cc().isGod() ){
+ return true;
+ }
+
+ if ( isLocalHost ){
+ atleastreadlock l("");
+ Client::GodScope gs;
+ Client::Context c("admin.system.users");
+ BSONObj result;
+ if( ! Helpers::getSingleton("admin.system.users", result) ){
+ if( warned == 0 ) {
+ warned++;
+ log() << "note: no users configured in admin.system.users, allowing localhost access" << endl;
+ }
+ return true;
+ }
+ }
+ return false;
+ }
+
} // namespace mongo
diff --git a/db/security.h b/db/security.h
index f61d5e1..261b123 100644
--- a/db/security.h
+++ b/db/security.h
@@ -22,9 +22,8 @@
#undef assert
#define assert xassert
-#include "db.h"
-#include "dbhelpers.h"
#include "nonce.h"
+#include "concurrency.h"
namespace mongo {
@@ -38,40 +37,42 @@ namespace mongo {
};
class AuthenticationInfo : boost::noncopyable {
+ mongo::mutex _lock;
map<string, Auth> m; // dbname -> auth
static int warned;
public:
bool isLocalHost;
AuthenticationInfo() { isLocalHost = false; }
- virtual ~AuthenticationInfo() {
+ ~AuthenticationInfo() {
}
- void logout(const char *dbname) {
- assertInWriteLock();
+ void logout(const string& dbname ) {
+ scoped_lock lk(_lock);
m.erase(dbname);
}
- void authorize(const char *dbname) {
- assertInWriteLock();
+ void authorize(const string& dbname ) {
+ scoped_lock lk(_lock);
m[dbname].level = 2;
}
- virtual bool isAuthorized(const char *dbname) {
- if( m[dbname].level == 2 ) return true;
+ void authorizeReadOnly(const string& dbname) {
+ scoped_lock lk(_lock);
+ m[dbname].level = 1;
+ }
+ bool isAuthorized(const string& dbname) { return _isAuthorized( dbname, 2 ); }
+ bool isAuthorizedReads(const string& dbname) { return _isAuthorized( dbname, 1 ); }
+ bool isAuthorizedForLock(const string& dbname, int lockType ) { return _isAuthorized( dbname , lockType > 0 ? 2 : 1 ); }
+
+ void print();
+
+ protected:
+ bool _isAuthorized(const string& dbname, int level) {
+ if( m[dbname].level >= level ) return true;
if( noauth ) return true;
- if( m["admin"].level == 2 ) return true;
- if( m["local"].level == 2 ) return true;
- if( isLocalHost ) {
- readlock l("");
- Client::Context c("admin.system.users");
- BSONObj result;
- if( Helpers::getSingleton("admin.system.users", result) )
- return false;
- if( warned == 0 ) {
- warned++;
- log() << "warning: no users configured in admin.system.users, allowing localhost access" << endl;
- }
- return true;
- }
- return false;
+ if( m["admin"].level >= level ) return true;
+ if( m["local"].level >= level ) return true;
+ return _isAuthorizedSpecialChecks( dbname );
}
+
+ bool _isAuthorizedSpecialChecks( const string& dbname );
};
} // namespace mongo
diff --git a/db/security_commands.cpp b/db/security_commands.cpp
index 9d63744..326d6e4 100644
--- a/db/security_commands.cpp
+++ b/db/security_commands.cpp
@@ -1,4 +1,20 @@
// security_commands.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
// security.cpp links with both dbgrid and db. this file db only -- at least for now.
// security.cpp
@@ -39,6 +55,7 @@ namespace mongo {
virtual bool slaveOk() {
return true;
}
+ virtual LockType locktype(){ return NONE; }
CmdGetNonce() : Command("getnonce") {}
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
nonce *n = new nonce(security.getNonce());
@@ -58,12 +75,12 @@ namespace mongo {
virtual bool slaveOk() {
return true;
}
+ virtual LockType locktype(){ return NONE; }
CmdLogout() : Command("logout") {}
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
// database->name is the one we are logging out...
- Client& client = cc();
- AuthenticationInfo *ai = client.ai;
- ai->logout(client.database()->name.c_str());
+ AuthenticationInfo *ai = cc().getAuthenticationInfo();
+ ai->logout(nsToDatabase(ns));
return true;
}
} cmdLogout;
@@ -77,6 +94,7 @@ namespace mongo {
virtual bool slaveOk() {
return true;
}
+ virtual LockType locktype(){ return WRITE; } // TODO: make this READ
CmdAuthenticate() : Command("authenticate") {}
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
log(1) << " authenticate: " << cmdObj << endl;
@@ -88,7 +106,7 @@ namespace mongo {
if( user.empty() || key.empty() || received_nonce.empty() ) {
log() << "field missing/wrong type in received authenticate command "
<< cc().database()->name
- << '\n';
+ << endl;
errmsg = "auth fails";
sleepmillis(10);
return false;
@@ -107,7 +125,7 @@ namespace mongo {
}
if ( reject ) {
- log() << "auth: bad nonce received or getnonce not called. could be a driver bug or a security attack. db:" << cc().database()->name << '\n';
+ log() << "auth: bad nonce received or getnonce not called. could be a driver bug or a security attack. db:" << cc().database()->name << endl;
errmsg = "auth fails";
sleepmillis(30);
return false;
@@ -124,7 +142,7 @@ namespace mongo {
b << "user" << user;
BSONObj query = b.done();
if( !Helpers::findOne(systemUsers.c_str(), query, userObj) ) {
- log() << "auth: couldn't find user " << user << ", " << systemUsers << '\n';
+ log() << "auth: couldn't find user " << user << ", " << systemUsers << endl;
errmsg = "auth fails";
return false;
}
@@ -146,13 +164,24 @@ namespace mongo {
string computed = digestToString( d );
if ( key != computed ){
- log() << "auth: key mismatch " << user << ", ns:" << ns << '\n';
+ log() << "auth: key mismatch " << user << ", ns:" << ns << endl;
errmsg = "auth fails";
return false;
}
- AuthenticationInfo *ai = currentClient.get()->ai;
- ai->authorize(cc().database()->name.c_str());
+ AuthenticationInfo *ai = cc().getAuthenticationInfo();
+
+ if ( userObj[ "readOnly" ].isBoolean() && userObj[ "readOnly" ].boolean() ) {
+ if ( readLockSupported() ){
+ ai->authorizeReadOnly( cc().database()->name.c_str() );
+ }
+ else {
+ log() << "warning: old version of boost, read-only users not supported" << endl;
+ ai->authorize( cc().database()->name.c_str() );
+ }
+ } else {
+ ai->authorize( cc().database()->name.c_str() );
+ }
return true;
}
} cmdAuthenticate;
diff --git a/db/stats/counters.cpp b/db/stats/counters.cpp
new file mode 100644
index 0000000..8e90902
--- /dev/null
+++ b/db/stats/counters.cpp
@@ -0,0 +1,131 @@
+// counters.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "stdafx.h"
+#include "../jsobj.h"
+#include "counters.h"
+
+namespace mongo {
+
+ OpCounters::OpCounters(){
+ int zero = 0;
+
+ BSONObjBuilder b;
+ b.append( "insert" , zero );
+ b.append( "query" , zero );
+ b.append( "update" , zero );
+ b.append( "delete" , zero );
+ b.append( "getmore" , zero );
+ b.append( "command" , zero );
+ _obj = b.obj();
+
+ _insert = (int*)_obj["insert"].value();
+ _query = (int*)_obj["query"].value();
+ _update = (int*)_obj["update"].value();
+ _delete = (int*)_obj["delete"].value();
+ _getmore = (int*)_obj["getmore"].value();
+ _command = (int*)_obj["command"].value();
+ }
+
+ void OpCounters::gotOp( int op , bool isCommand ){
+ switch ( op ){
+ case dbInsert: gotInsert(); break;
+ case dbQuery:
+ if ( isCommand )
+ gotCommand();
+ else
+ gotQuery();
+ break;
+
+ case dbUpdate: gotUpdate(); break;
+ case dbDelete: gotDelete(); break;
+ case dbGetMore: gotGetMore(); break;
+ case dbKillCursors:
+ case opReply:
+ case dbMsg:
+ break;
+ default: log() << "OpCounters::gotOp unknown op: " << op << endl;
+ }
+ }
+
+ IndexCounters::IndexCounters(){
+ _memSupported = _pi.blockCheckSupported();
+
+ _btreeMemHits = 0;
+ _btreeMemMisses = 0;
+ _btreeAccesses = 0;
+
+
+ _maxAllowed = ( numeric_limits< long long >::max() ) / 2;
+ _resets = 0;
+
+ _sampling = 0;
+ _samplingrate = 100;
+ }
+
+ void IndexCounters::append( BSONObjBuilder& b ){
+ if ( ! _memSupported ){
+ b.append( "note" , "not supported on this platform" );
+ return;
+ }
+
+ BSONObjBuilder bb( b.subobjStart( "btree" ) );
+ bb.appendNumber( "accesses" , _btreeAccesses );
+ bb.appendNumber( "hits" , _btreeMemHits );
+ bb.appendNumber( "misses" , _btreeMemMisses );
+
+ bb.append( "resets" , _resets );
+
+ bb.append( "missRatio" , (_btreeAccesses ? (_btreeMemMisses / (double)_btreeAccesses) : 0) );
+
+ bb.done();
+
+ if ( _btreeAccesses > _maxAllowed ){
+ _btreeAccesses = 0;
+ _btreeMemMisses = 0;
+ _btreeMemHits = 0;
+ _resets++;
+ }
+ }
+
+ FlushCounters::FlushCounters()
+ : _total_time(0)
+ , _flushes(0)
+ , _last()
+ {}
+
+ void FlushCounters::flushed(int ms){
+ _flushes++;
+ _total_time += ms;
+ _last_time = ms;
+ _last = jsTime();
+ }
+
+ void FlushCounters::append( BSONObjBuilder& b ){
+ b.appendNumber( "flushes" , _flushes );
+ b.appendNumber( "total_ms" , _total_time );
+ b.appendNumber( "average_ms" , (_flushes ? (_total_time / double(_flushes)) : 0.0) );
+ b.appendNumber( "last_ms" , _last_time );
+ b.append("last_finished", _last);
+ }
+
+
+ OpCounters globalOpCounters;
+ IndexCounters globalIndexCounters;
+ FlushCounters globalFlushCounters;
+}
diff --git a/db/stats/counters.h b/db/stats/counters.h
new file mode 100644
index 0000000..41c2cd2
--- /dev/null
+++ b/db/stats/counters.h
@@ -0,0 +1,121 @@
+// counters.h
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "../../stdafx.h"
+#include "../jsobj.h"
+#include "../../util/message.h"
+#include "../../util/processinfo.h"
+
+namespace mongo {
+
+ /**
+ * for storing operation counters
+ * note: not thread safe. ok with that for speed
+ */
+ class OpCounters {
+ public:
+
+ OpCounters();
+
+ int * getInsert(){ return _insert; }
+ int * getQuery(){ return _query; }
+ int * getUpdate(){ return _update; }
+ int * getDelete(){ return _delete; }
+ int * getGetMore(){ return _getmore; }
+ int * getCommand(){ return _command; }
+
+ void gotInsert(){ _insert[0]++; }
+ void gotQuery(){ _query[0]++; }
+ void gotUpdate(){ _update[0]++; }
+ void gotDelete(){ _delete[0]++; }
+ void gotGetMore(){ _getmore[0]++; }
+ void gotCommand(){ _command[0]++; }
+
+ void gotOp( int op , bool isCommand );
+
+ BSONObj& getObj(){ return _obj; }
+ private:
+ BSONObj _obj;
+ int * _insert;
+ int * _query;
+ int * _update;
+ int * _delete;
+ int * _getmore;
+ int * _command;
+ };
+
+ extern OpCounters globalOpCounters;
+
+ class IndexCounters {
+ public:
+ IndexCounters();
+
+ void btree( char * node ){
+ if ( ! _memSupported )
+ return;
+ if ( _sampling++ % _samplingrate )
+ return;
+ btree( _pi.blockInMemory( node ) );
+ }
+
+ void btree( bool memHit ){
+ if ( memHit )
+ _btreeMemHits++;
+ else
+ _btreeMemMisses++;
+ _btreeAccesses++;
+ }
+ void btreeHit(){ _btreeMemHits++; _btreeAccesses++; }
+ void btreeMiss(){ _btreeMemMisses++; _btreeAccesses++; }
+
+ void append( BSONObjBuilder& b );
+
+ private:
+ ProcessInfo _pi;
+ bool _memSupported;
+
+ int _sampling;
+ int _samplingrate;
+
+ int _resets;
+ long long _maxAllowed;
+
+ long long _btreeMemMisses;
+ long long _btreeMemHits;
+ long long _btreeAccesses;
+ };
+
+ extern IndexCounters globalIndexCounters;
+
+ class FlushCounters {
+ public:
+ FlushCounters();
+
+ void flushed(int ms);
+
+ void append( BSONObjBuilder& b );
+
+ private:
+ long long _total_time;
+ long long _flushes;
+ int _last_time;
+ Date_t _last;
+ };
+
+ extern FlushCounters globalFlushCounters;
+}
diff --git a/db/stats/snapshots.cpp b/db/stats/snapshots.cpp
new file mode 100644
index 0000000..71ddd72
--- /dev/null
+++ b/db/stats/snapshots.cpp
@@ -0,0 +1,144 @@
+// snapshots.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "stdafx.h"
+#include "snapshots.h"
+#include "../client.h"
+#include "../clientcursor.h"
+
+/**
+ handles snapshotting performance metrics and other such things
+ */
+namespace mongo {
+ void SnapshotData::takeSnapshot(){
+ _created = curTimeMicros64();
+ _globalUsage = Top::global.getGlobalData();
+ _totalWriteLockedTime = dbMutex.info().getTimeLocked();
+ Top::global.cloneMap(_usage);
+ }
+
+ SnapshotDelta::SnapshotDelta( const SnapshotData& older , const SnapshotData& newer )
+ : _older( older ) , _newer( newer )
+ {
+ assert( _newer._created > _older._created );
+ _elapsed = _newer._created - _older._created;
+
+ }
+
+ Top::CollectionData SnapshotDelta::globalUsageDiff(){
+ return Top::CollectionData( _older._globalUsage , _newer._globalUsage );
+ }
+ Top::UsageMap SnapshotDelta::collectionUsageDiff(){
+ Top::UsageMap u;
+
+ for ( Top::UsageMap::const_iterator i=_newer._usage.begin(); i != _newer._usage.end(); i++ ){
+ Top::UsageMap::const_iterator j = _older._usage.find(i->first);
+ if (j != _older._usage.end())
+ u[i->first] = Top::CollectionData( j->second , i->second );
+ }
+ return u;
+ }
+
+ Snapshots::Snapshots(int n)
+ : _n(n)
+ , _snapshots(new SnapshotData[n])
+ , _loc(0)
+ , _stored(0)
+ {}
+
+ const SnapshotData* Snapshots::takeSnapshot(){
+ scoped_lock lk(_lock);
+ _loc = ( _loc + 1 ) % _n;
+ _snapshots[_loc].takeSnapshot();
+ if ( _stored < _n )
+ _stored++;
+ return &_snapshots[_loc];
+ }
+
+ auto_ptr<SnapshotDelta> Snapshots::computeDelta( int numBack ){
+ scoped_lock lk(_lock);
+ auto_ptr<SnapshotDelta> p;
+ if ( numBack < numDeltas() )
+ p.reset( new SnapshotDelta( getPrev(numBack+1) , getPrev(numBack) ) );
+ return p;
+ }
+
+ const SnapshotData& Snapshots::getPrev( int numBack ){
+ int x = _loc - numBack;
+ if ( x < 0 )
+ x += _n;
+ return _snapshots[x];
+ }
+
+ void Snapshots::outputLockInfoHTML( stringstream& ss ){
+ scoped_lock lk(_lock);
+ ss << "\n<table>";
+ ss << "<tr><th>elapsed(ms)</th><th>% write locked</th></tr>\n";
+
+ for ( int i=0; i<numDeltas(); i++ ){
+ SnapshotDelta d( getPrev(i+1) , getPrev(i) );
+ ss << "<tr>"
+ << "<td>" << ( d.elapsed() / 1000 ) << "</td>"
+ << "<td>" << (unsigned)(100*d.percentWriteLocked()) << "%</td>"
+ << "</tr>"
+ ;
+ }
+
+ ss << "</table>\n";
+ }
+
+ void SnapshotThread::run(){
+ Client::initThread("snapshotthread");
+ Client& client = cc();
+
+ long long numLoops = 0;
+
+ const SnapshotData* prev = 0;
+
+ while ( ! inShutdown() ){
+ try {
+ const SnapshotData* s = statsSnapshots.takeSnapshot();
+
+ if ( prev ){
+ unsigned long long elapsed = s->_created - prev->_created;
+
+ if ( cmdLine.cpu ){
+ SnapshotDelta d( *prev , *s );
+ log() << "cpu: elapsed:" << (elapsed/1000) <<" writelock: " << (int)(100*d.percentWriteLocked()) << "%" << endl;
+ }
+
+ // TODO: this should really be somewhere else, like in a special ClientCursor thread
+ ClientCursor::idleTimeReport( (unsigned)(elapsed/1000) );
+ }
+
+ prev = s;
+ }
+ catch ( std::exception& e ){
+ log() << "ERROR in SnapshotThread: " << e.what() << endl;
+ }
+
+ numLoops++;
+ sleepsecs(4);
+ }
+
+ client.shutdown();
+ }
+
+ Snapshots statsSnapshots;
+ SnapshotThread snapshotThread;
+}
diff --git a/db/stats/snapshots.h b/db/stats/snapshots.h
new file mode 100644
index 0000000..542318a
--- /dev/null
+++ b/db/stats/snapshots.h
@@ -0,0 +1,113 @@
+// snapshots.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+#include "../../stdafx.h"
+#include "../jsobj.h"
+#include "top.h"
+#include "../../util/background.h"
+
+/**
+ handles snapshotting performance metrics and other such things
+ */
+namespace mongo {
+
+ class SnapshotThread;
+
+ /**
+ * stores a point in time snapshot
+ * i.e. all counters at a given time
+ */
+ class SnapshotData {
+ void takeSnapshot();
+
+ unsigned long long _created;
+ Top::CollectionData _globalUsage;
+ unsigned long long _totalWriteLockedTime; // micros of total time locked
+ Top::UsageMap _usage;
+
+ friend class SnapshotThread;
+ friend class SnapshotDelta;
+ friend class Snapshots;
+ };
+
+ /**
+ * contains performance information for a time period
+ */
+ class SnapshotDelta {
+ public:
+ SnapshotDelta( const SnapshotData& older , const SnapshotData& newer );
+
+ unsigned long long start() const {
+ return _older._created;
+ }
+
+ unsigned long long elapsed() const {
+ return _elapsed;
+ }
+
+ unsigned long long timeInWriteLock() const {
+ return _newer._totalWriteLockedTime - _older._totalWriteLockedTime;
+ }
+ double percentWriteLocked() const {
+ double e = (double) elapsed();
+ double w = (double) timeInWriteLock();
+ return w/e;
+ }
+
+ Top::CollectionData globalUsageDiff();
+ Top::UsageMap collectionUsageDiff();
+
+ private:
+ const SnapshotData& _older;
+ const SnapshotData& _newer;
+
+ unsigned long long _elapsed;
+ };
+
+ class Snapshots {
+ public:
+ Snapshots(int n=100);
+
+ const SnapshotData* takeSnapshot();
+
+ int numDeltas() const { return _stored-1; }
+
+ const SnapshotData& getPrev( int numBack = 0 );
+ auto_ptr<SnapshotDelta> computeDelta( int numBack = 0 );
+
+
+ void outputLockInfoHTML( stringstream& ss );
+ private:
+ mongo::mutex _lock;
+ int _n;
+ boost::scoped_array<SnapshotData> _snapshots;
+ int _loc;
+ int _stored;
+ };
+
+ class SnapshotThread : public BackgroundJob {
+ public:
+ void run();
+ };
+
+ extern Snapshots statsSnapshots;
+ extern SnapshotThread snapshotThread;
+
+
+}
diff --git a/db/stats/top.cpp b/db/stats/top.cpp
new file mode 100644
index 0000000..0f27943
--- /dev/null
+++ b/db/stats/top.cpp
@@ -0,0 +1,181 @@
+// top.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "stdafx.h"
+#include "top.h"
+#include "../../util/message.h"
+#include "../commands.h"
+
+namespace mongo {
+
+ Top::UsageData::UsageData( const UsageData& older , const UsageData& newer )
+ : time(newer.time-older.time) ,
+ count(newer.count-older.count)
+ {
+
+ }
+
+ Top::CollectionData::CollectionData( const CollectionData& older , const CollectionData& newer )
+ : total( older.total , newer.total ) ,
+ readLock( older.readLock , newer.readLock ) ,
+ writeLock( older.writeLock , newer.writeLock ) ,
+ queries( older.queries , newer.queries ) ,
+ getmore( older.getmore , newer.getmore ) ,
+ insert( older.insert , newer.insert ) ,
+ update( older.update , newer.update ) ,
+ remove( older.remove , newer.remove ),
+ commands( older.commands , newer.commands )
+ {
+
+ }
+
+
+ void Top::record( const string& ns , int op , int lockType , long long micros , bool command ){
+ //cout << "record: " << ns << "\t" << op << "\t" << command << endl;
+ scoped_lock lk(_lock);
+
+ if ( ( command || op == dbQuery ) && ns == _lastDropped ){
+ _lastDropped = "";
+ return;
+ }
+
+ CollectionData& coll = _usage[ns];
+ _record( coll , op , lockType , micros , command );
+ _record( _global , op , lockType , micros , command );
+ }
+
+ void Top::collectionDropped( const string& ns ){
+ //cout << "collectionDropped: " << ns << endl;
+ scoped_lock lk(_lock);
+ _usage.erase(ns);
+ _lastDropped = ns;
+ }
+
+ void Top::_record( CollectionData& c , int op , int lockType , long long micros , bool command ){
+ c.total.inc( micros );
+
+ if ( lockType > 0 )
+ c.writeLock.inc( micros );
+ else if ( lockType < 0 )
+ c.readLock.inc( micros );
+
+ switch ( op ){
+ case 0:
+ // use 0 for unknown, non-specific
+ break;
+ case dbUpdate:
+ c.update.inc( micros );
+ break;
+ case dbInsert:
+ c.insert.inc( micros );
+ break;
+ case dbQuery:
+ if ( command )
+ c.commands.inc( micros );
+ else
+ c.queries.inc( micros );
+ break;
+ case dbGetMore:
+ c.getmore.inc( micros );
+ break;
+ case dbDelete:
+ c.remove.inc( micros );
+ break;
+ case opReply:
+ case dbMsg:
+ case dbKillCursors:
+ log() << "unexpected op in Top::record: " << op << endl;
+ break;
+ default:
+ log() << "unknown op in Top::record: " << op << endl;
+ }
+
+ }
+
+ void Top::cloneMap(Top::UsageMap& out){
+ scoped_lock lk(_lock);
+ out = _usage;
+ }
+
+ void Top::append( BSONObjBuilder& b ){
+ scoped_lock lk( _lock );
+ append( b , _usage );
+ }
+
+ void Top::append( BSONObjBuilder& b , const char * name , const UsageData& map ){
+ BSONObjBuilder bb( b.subobjStart( name ) );
+ bb.appendNumber( "time" , map.time );
+ bb.appendNumber( "count" , map.count );
+ bb.done();
+ }
+
+ void Top::append( BSONObjBuilder& b , const UsageMap& map ){
+ for ( UsageMap::const_iterator i=map.begin(); i!=map.end(); i++ ){
+ BSONObjBuilder bb( b.subobjStart( i->first.c_str() ) );
+
+ const CollectionData& coll = i->second;
+
+ append( b , "total" , coll.total );
+
+ append( b , "readLock" , coll.readLock );
+ append( b , "writeLock" , coll.writeLock );
+
+ append( b , "queries" , coll.queries );
+ append( b , "getmore" , coll.getmore );
+ append( b , "insert" , coll.insert );
+ append( b , "update" , coll.update );
+ append( b , "remove" , coll.remove );
+ append( b , "commands" , coll.commands );
+
+ bb.done();
+ }
+ }
+
+ class TopCmd : public Command {
+ public:
+ TopCmd() : Command( "top" ){}
+
+ virtual bool slaveOk(){ return true; }
+ virtual bool adminOnly(){ return true; }
+ virtual LockType locktype(){ return READ; }
+ virtual void help( stringstream& help ) const { help << "usage by collection"; }
+
+ virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl){
+ {
+ BSONObjBuilder b( result.subobjStart( "totals" ) );
+ Top::global.append( b );
+ b.done();
+ }
+ return true;
+ }
+
+ } topCmd;
+
+ Top Top::global;
+
+ TopOld::T TopOld::_snapshotStart = TopOld::currentTime();
+ TopOld::D TopOld::_snapshotDuration;
+ TopOld::UsageMap TopOld::_totalUsage;
+ TopOld::UsageMap TopOld::_snapshotA;
+ TopOld::UsageMap TopOld::_snapshotB;
+ TopOld::UsageMap &TopOld::_snapshot = TopOld::_snapshotA;
+ TopOld::UsageMap &TopOld::_nextSnapshot = TopOld::_snapshotB;
+ mongo::mutex TopOld::topMutex;
+
+
+}
diff --git a/util/top.h b/db/stats/top.h
index aaf7c3f..8dab3b0 100644
--- a/util/top.h
+++ b/db/stats/top.h
@@ -23,15 +23,80 @@
namespace mongo {
+ /**
+ * tracks usage by collection
+ */
+ class Top {
+
+ public:
+ class UsageData {
+ public:
+ UsageData() : time(0) , count(0){}
+ UsageData( const UsageData& older , const UsageData& newer );
+ long long time;
+ long long count;
+
+ void inc( long long micros ){
+ count++;
+ time += micros;
+ }
+ };
+
+ class CollectionData {
+ public:
+ /**
+ * constructs a diff
+ */
+ CollectionData(){}
+ CollectionData( const CollectionData& older , const CollectionData& newer );
+
+ UsageData total;
+
+ UsageData readLock;
+ UsageData writeLock;
+
+ UsageData queries;
+ UsageData getmore;
+ UsageData insert;
+ UsageData update;
+ UsageData remove;
+ UsageData commands;
+ };
+
+ typedef map<string,CollectionData> UsageMap;
+
+ public:
+ void record( const string& ns , int op , int lockType , long long micros , bool command );
+ void append( BSONObjBuilder& b );
+ void cloneMap(UsageMap& out);
+ CollectionData getGlobalData(){ return _global; }
+ void collectionDropped( const string& ns );
+
+ public: // static stuff
+ static Top global;
+
+ void append( BSONObjBuilder& b , const char * name , const UsageData& map );
+ void append( BSONObjBuilder& b , const UsageMap& map );
+
+ private:
+
+ void _record( CollectionData& c , int op , int lockType , long long micros , bool command );
+
+ mongo::mutex _lock;
+ CollectionData _global;
+ UsageMap _usage;
+ string _lastDropped;
+ };
+
/* Records per namespace utilization of the mongod process.
No two functions of this class may be called concurrently.
*/
- class Top {
+ class TopOld {
typedef boost::posix_time::ptime T;
typedef boost::posix_time::time_duration D;
typedef boost::tuple< D, int, int, int > UsageData;
public:
- Top() : _read(false), _write(false) { }
+ TopOld() : _read(false), _write(false) { }
/* these are used to record activity: */
@@ -52,7 +117,7 @@ namespace mongo {
D d = currentTime() - _currentStart;
{
- boostlock L(topMutex);
+ scoped_lock L(topMutex);
recordUsage( _current, d );
}
@@ -71,7 +136,7 @@ namespace mongo {
};
static void usage( vector< Usage > &res ) {
- boostlock L(topMutex);
+ scoped_lock L(topMutex);
// Populate parent namespaces
UsageMap snapshot;
@@ -109,7 +174,7 @@ namespace mongo {
}
static void completeSnapshot() {
- boostlock L(topMutex);
+ scoped_lock L(topMutex);
if ( &_snapshot == &_snapshotA ) {
_snapshot = _snapshotB;
@@ -124,7 +189,7 @@ namespace mongo {
}
private:
- static boost::mutex topMutex;
+ static mongo::mutex topMutex;
static bool trivialNs( const char *ns ) {
const char *ret = strrchr( ns, '.' );
return ret && ret[ 1 ] == '\0';
diff --git a/db/storage.cpp b/db/storage.cpp
index 4da2d82..7ddfc65 100644
--- a/db/storage.cpp
+++ b/db/storage.cpp
@@ -1,4 +1,20 @@
// storage.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
#include "stdafx.h"
#include "pdfile.h"
diff --git a/db/update.cpp b/db/update.cpp
index 0639a99..d6a5c5e 100644
--- a/db/update.cpp
+++ b/db/update.cpp
@@ -26,8 +26,11 @@
namespace mongo {
+ //#define DEBUGUPDATE(x) cout << x << endl;
+#define DEBUGUPDATE(x)
+
const char* Mod::modNames[] = { "$inc", "$set", "$push", "$pushAll", "$pull", "$pullAll" , "$pop", "$unset" ,
- "$bitand" , "$bitor" , "$bit" };
+ "$bitand" , "$bitor" , "$bit" , "$addToSet" };
unsigned Mod::modNamesNum = sizeof(Mod::modNames)/sizeof(char*);
bool Mod::_pullElementMatch( BSONElement& toMatch ) const {
@@ -46,13 +49,42 @@ namespace mongo {
return matcher->matches( toMatch.embeddedObject() );
}
- void Mod::apply( BSONObjBuilder& b , BSONElement in ){
+ template< class Builder >
+ void Mod::appendIncremented( Builder& bb , const BSONElement& in, ModState& ms ) const {
+ BSONType a = in.type();
+ BSONType b = elt.type();
+
+ if ( a == NumberDouble || b == NumberDouble ){
+ ms.incType = NumberDouble;
+ ms.incdouble = elt.numberDouble() + in.numberDouble();
+ }
+ else if ( a == NumberLong || b == NumberLong ){
+ ms.incType = NumberLong;
+ ms.inclong = elt.numberLong() + in.numberLong();
+ }
+ else {
+ ms.incType = NumberInt;
+ ms.incint = elt.numberInt() + in.numberInt();
+ }
+
+ ms.appendIncValue( bb );
+ }
+
+ template< class Builder >
+ void appendUnset( Builder &b ) {
+ }
+
+ template<>
+ void appendUnset( BSONArrayBuilder &b ) {
+ b.appendNull();
+ }
+
+ template< class Builder >
+ void Mod::apply( Builder& b , BSONElement in , ModState& ms ) const {
switch ( op ){
case INC: {
- // TODO: this is horrible
- inc( in );
- b.appendAs( elt , shortFieldName );
+ appendIncremented( b , in , ms );
break;
}
@@ -63,10 +95,10 @@ namespace mongo {
}
case UNSET: {
- //Explicit NOOP
+ appendUnset( b );
break;
}
-
+
case PUSH: {
uassert( 10131 , "$push can only be applied to an array" , in.type() == Array );
BSONObjBuilder bb( b.subarrayStart( shortFieldName ) );
@@ -77,13 +109,60 @@ namespace mongo {
n++;
}
- pushStartSize = n;
+ ms.pushStartSize = n;
bb.appendAs( elt , bb.numStr( n ) );
bb.done();
break;
}
+ case ADDTOSET: {
+ uassert( 12592 , "$addToSet can only be applied to an array" , in.type() == Array );
+ BSONObjBuilder bb( b.subarrayStart( shortFieldName ) );
+
+ BSONObjIterator i( in.embeddedObject() );
+ int n=0;
+
+ if ( isEach() ){
+
+ BSONElementSet toadd;
+ parseEach( toadd );
+
+ while ( i.more() ){
+ BSONElement cur = i.next();
+ bb.append( cur );
+ n++;
+ toadd.erase( cur );
+ }
+
+ for ( BSONElementSet::iterator j=toadd.begin(); j!=toadd.end(); j++ ){
+ bb.appendAs( *j , BSONObjBuilder::numStr( n++ ) );
+ }
+
+ }
+ else {
+
+ bool found = false;
+
+ while ( i.more() ){
+ BSONElement cur = i.next();
+ bb.append( cur );
+ n++;
+ if ( elt.woCompare( cur , false ) == 0 )
+ found = true;
+ }
+
+ if ( ! found )
+ bb.appendAs( elt , bb.numStr( n ) );
+
+ }
+
+ bb.done();
+ break;
+ }
+
+
+
case PUSH_ALL: {
uassert( 10132 , "$pushAll can only be applied to an array" , in.type() == Array );
uassert( 10133 , "$pushAll has to be passed an array" , elt.type() );
@@ -97,7 +176,7 @@ namespace mongo {
n++;
}
- pushStartSize = n;
+ ms.pushStartSize = n;
i = BSONObjIterator( elt.embeddedObject() );
while ( i.more() ){
@@ -172,8 +251,8 @@ namespace mongo {
}
}
- pushStartSize = n;
- assert( pushStartSize == in.embeddedObject().nFields() );
+ ms.pushStartSize = n;
+ assert( ms.pushStartSize == in.embeddedObject().nFields() );
bb.done();
break;
}
@@ -226,97 +305,130 @@ namespace mongo {
}
}
- bool ModSet::canApplyInPlaceAndVerify(const BSONObj &obj) const {
- bool inPlacePossible = true;
+ auto_ptr<ModSetState> ModSet::prepare(const BSONObj &obj) const {
+ ModSetState * mss = new ModSetState( obj );
// Perform this check first, so that we don't leave a partially modified object on uassert.
for ( ModHolder::const_iterator i = _mods.begin(); i != _mods.end(); ++i ) {
+ ModState& ms = mss->_mods[i->first];
+
const Mod& m = i->second;
BSONElement e = obj.getFieldDotted(m.fieldName);
-
+
+ ms.m = &m;
+ ms.old = e;
+
if ( e.eoo() ) {
- inPlacePossible = (m.op == Mod::UNSET);
+ mss->amIInPlacePossible( m.op == Mod::UNSET );
+ continue;
}
- else {
- switch( m.op ) {
- case Mod::INC:
- uassert( 10140 , "Cannot apply $inc modifier to non-number", e.isNumber() || e.eoo() );
- if ( !e.isNumber() )
- inPlacePossible = false;
- break;
- case Mod::SET:
- inPlacePossible =
- m.elt.type() == e.type() &&
- m.elt.valuesize() == e.valuesize();
- break;
- case Mod::PUSH:
- case Mod::PUSH_ALL:
- uassert( 10141 , "Cannot apply $push/$pushAll modifier to non-array", e.type() == Array || e.eoo() );
- inPlacePossible = false;
- break;
- case Mod::PULL:
- case Mod::PULL_ALL: {
- uassert( 10142 , "Cannot apply $pull/$pullAll modifier to non-array", e.type() == Array || e.eoo() );
- BSONObjIterator i( e.embeddedObject() );
- while( inPlacePossible && i.more() ) {
- BSONElement arrI = i.next();
- if ( m.op == Mod::PULL ) {
- if ( m._pullElementMatch( arrI ) )
- inPlacePossible = false;
- }
- else if ( m.op == Mod::PULL_ALL ) {
- BSONObjIterator j( m.elt.embeddedObject() );
- while( inPlacePossible && j.moreWithEOO() ) {
- BSONElement arrJ = j.next();
- if ( arrJ.eoo() )
- break;
- if ( arrI.woCompare( arrJ, false ) == 0 ) {
- inPlacePossible = false;
- }
- }
+
+ switch( m.op ) {
+ case Mod::INC:
+ uassert( 10140 , "Cannot apply $inc modifier to non-number", e.isNumber() || e.eoo() );
+ if ( mss->amIInPlacePossible( e.isNumber() ) ){
+ // check more typing info here
+ if ( m.elt.type() != e.type() ){
+ // if i'm incrememnting with a double, then the storage has to be a double
+ mss->amIInPlacePossible( m.elt.type() != NumberDouble );
+ }
+ }
+ break;
+
+ case Mod::SET:
+ mss->amIInPlacePossible( m.elt.type() == e.type() &&
+ m.elt.valuesize() == e.valuesize() );
+ break;
+
+ case Mod::PUSH:
+ case Mod::PUSH_ALL:
+ uassert( 10141 , "Cannot apply $push/$pushAll modifier to non-array", e.type() == Array || e.eoo() );
+ mss->amIInPlacePossible( false );
+ break;
+
+ case Mod::PULL:
+ case Mod::PULL_ALL: {
+ uassert( 10142 , "Cannot apply $pull/$pullAll modifier to non-array", e.type() == Array || e.eoo() );
+ BSONObjIterator i( e.embeddedObject() );
+ while( mss->_inPlacePossible && i.more() ) {
+ BSONElement arrI = i.next();
+ if ( m.op == Mod::PULL ) {
+ mss->amIInPlacePossible( ! m._pullElementMatch( arrI ) );
+ }
+ else if ( m.op == Mod::PULL_ALL ) {
+ BSONObjIterator j( m.elt.embeddedObject() );
+ while( mss->_inPlacePossible && j.moreWithEOO() ) {
+ BSONElement arrJ = j.next();
+ if ( arrJ.eoo() )
+ break;
+ mss->amIInPlacePossible( arrI.woCompare( arrJ, false ) );
}
}
- break;
}
- case Mod::POP: {
- uassert( 10143 , "Cannot apply $pop modifier to non-array", e.type() == Array || e.eoo() );
- if ( ! e.embeddedObject().isEmpty() )
- inPlacePossible = false;
- break;
+ break;
+ }
+
+ case Mod::POP: {
+ uassert( 10143 , "Cannot apply $pop modifier to non-array", e.type() == Array || e.eoo() );
+ mss->amIInPlacePossible( e.embeddedObject().isEmpty() );
+ break;
+ }
+
+ case Mod::ADDTOSET: {
+ uassert( 12591 , "Cannot apply $addToSet modifier to non-array", e.type() == Array || e.eoo() );
+
+ BSONObjIterator i( e.embeddedObject() );
+ if ( m.isEach() ){
+ BSONElementSet toadd;
+ m.parseEach( toadd );
+ while( i.more() ) {
+ BSONElement arrI = i.next();
+ toadd.erase( arrI );
+ }
+ mss->amIInPlacePossible( toadd.size() == 0 );
}
- default:
- // mods we don't know about shouldn't be done in place
- inPlacePossible = false;
+ else {
+ bool found = false;
+ while( i.more() ) {
+ BSONElement arrI = i.next();
+ if ( arrI.woCompare( m.elt , false ) == 0 ){
+ found = true;
+ break;
+ }
+ }
+ mss->amIInPlacePossible( found );
}
+ break;
+ }
+
+ default:
+ // mods we don't know about shouldn't be done in place
+ mss->amIInPlacePossible( false );
}
}
- return inPlacePossible;
+ return auto_ptr<ModSetState>( mss );
}
- void ModSet::applyModsInPlace(const BSONObj &obj) const {
- for ( ModHolder::const_iterator i = _mods.begin(); i != _mods.end(); ++i ) {
- const Mod& m = i->second;
- BSONElement e = obj.getFieldDotted(m.fieldName);
+ void ModSetState::applyModsInPlace() {
+ for ( ModStateHolder::iterator i = _mods.begin(); i != _mods.end(); ++i ) {
+ ModState& m = i->second;
- switch ( m.op ){
+ switch ( m.m->op ){
case Mod::UNSET:
case Mod::PULL:
case Mod::PULL_ALL:
+ case Mod::ADDTOSET:
+ // this should have been handled by prepare
break;
// [dm] the BSONElementManipulator statements below are for replication (correct?)
case Mod::INC:
- m.inc(e);
- m.setElementToOurNumericValue(e);
+ m.m->incrementMe( m.old );
+ m.fixedName = "$set";
+ m.fixed = &(m.old);
break;
case Mod::SET:
- if ( e.isNumber() && m.elt.isNumber() ) {
- // todo: handle NumberLong:
- m.setElementToOurNumericValue(e);
- }
- else {
- BSONElementManipulator( e ).replaceTypeAndValue( m.elt );
- }
+ BSONElementManipulator( m.old ).replaceTypeAndValue( m.m->elt );
break;
default:
uassert( 10144 , "can't apply mod in place - shouldn't have gotten here" , 0 );
@@ -342,18 +454,19 @@ namespace mongo {
fields[ base + top.fieldName() ] = top;
}
- void ModSet::_appendNewFromMods( const string& root , Mod& m , BSONObjBuilder& b , set<string>& onedownseen ){
- const char * temp = m.fieldName;
+ template< class Builder >
+ void ModSetState::_appendNewFromMods( const string& root , ModState& m , Builder& b , set<string>& onedownseen ){
+ const char * temp = m.fieldName();
temp += root.size();
const char * dot = strchr( temp , '.' );
if ( dot ){
- string nr( m.fieldName , 0 , 1 + ( dot - m.fieldName ) );
+ string nr( m.fieldName() , 0 , 1 + ( dot - m.fieldName() ) );
string nf( temp , 0 , dot - temp );
if ( onedownseen.count( nf ) )
return;
onedownseen.insert( nf );
BSONObjBuilder bb ( b.subobjStart( nf.c_str() ) );
- createNewFromMods( nr , bb , BSONObj() );
+ createNewFromMods( nr , bb , BSONObj() ); // don't infer an array from name
bb.done();
}
else {
@@ -362,29 +475,37 @@ namespace mongo {
}
- void ModSet::createNewFromMods( const string& root , BSONObjBuilder& b , const BSONObj &obj ){
+ template< class Builder >
+ void ModSetState::createNewFromMods( const string& root , Builder& b , const BSONObj &obj ){
BSONObjIteratorSorted es( obj );
BSONElement e = es.next();
- ModHolder::iterator m = _mods.lower_bound( root );
- ModHolder::iterator mend = _mods.lower_bound( root + "{" );
+ ModStateHolder::iterator m = _mods.lower_bound( root );
+ ModStateHolder::iterator mend = _mods.lower_bound( root + '{' );
set<string> onedownseen;
while ( e.type() && m != mend ){
string field = root + e.fieldName();
- FieldCompareResult cmp = compareDottedFieldNames( m->second.fieldName , field );
-
+ FieldCompareResult cmp = compareDottedFieldNames( m->second.m->fieldName , field );
+
switch ( cmp ){
case LEFT_SUBFIELD: { // Mod is embeddeed under this element
uassert( 10145 , "LEFT_SUBFIELD only supports Object" , e.type() == Object || e.type() == Array );
if ( onedownseen.count( e.fieldName() ) == 0 ){
onedownseen.insert( e.fieldName() );
- BSONObjBuilder bb ( e.type() == Object ? b.subobjStart( e.fieldName() ) : b.subarrayStart( e.fieldName() ) );
- stringstream nr; nr << root << e.fieldName() << ".";
- createNewFromMods( nr.str() , bb , e.embeddedObject() );
- bb.done();
+ if ( e.type() == Object ) {
+ BSONObjBuilder bb( b.subobjStart( e.fieldName() ) );
+ stringstream nr; nr << root << e.fieldName() << ".";
+ createNewFromMods( nr.str() , bb , e.embeddedObject() );
+ bb.done();
+ } else {
+ BSONArrayBuilder ba( b.subarrayStart( e.fieldName() ) );
+ stringstream nr; nr << root << e.fieldName() << ".";
+ createNewFromMods( nr.str() , ba , e.embeddedObject() );
+ ba.done();
+ }
// inc both as we handled both
e = es.next();
m++;
@@ -401,7 +522,7 @@ namespace mongo {
m++;
continue;
case RIGHT_BEFORE: // field that doesn't have a MOD
- b.append( e );
+ b.append( e ); // if array, ignore field name
e = es.next();
continue;
case RIGHT_SUBFIELD:
@@ -414,7 +535,7 @@ namespace mongo {
// finished looping the mods, just adding the rest of the elements
while ( e.type() ){
- b.append( e );
+ b.append( e ); // if array, ignore field name
e = es.next();
}
@@ -424,9 +545,9 @@ namespace mongo {
}
}
- BSONObj ModSet::createNewFromMods( const BSONObj &obj ) {
- BSONObjBuilder b( (int)(obj.objsize() * 1.1) );
- createNewFromMods( "" , b , obj );
+ BSONObj ModSetState::createNewFromMods() {
+ BSONObjBuilder b( (int)(_obj.objsize() * 1.1) );
+ createNewFromMods( "" , b , _obj );
return b.obj();
}
@@ -451,10 +572,12 @@ namespace mongo {
newObj = bb.obj();
}
- if ( canApplyInPlaceAndVerify( newObj ) )
- applyModsInPlace( newObj );
+ auto_ptr<ModSetState> mss = prepare( newObj );
+
+ if ( mss->canApplyInPlace() )
+ mss->applyModsInPlace();
else
- newObj = createNewFromMods( newObj );
+ newObj = mss->createNewFromMods();
return newObj;
}
@@ -468,17 +591,24 @@ namespace mongo {
{ $pullAll : { a:[99,1010] } }
NOTE: MODIFIES source from object!
*/
- void ModSet::getMods(const BSONObj &from) {
+ ModSet::ModSet(
+ const BSONObj &from ,
+ const set<string>& idxKeys,
+ const set<string> *backgroundKeys)
+ : _isIndexed(0) , _hasDynamicArray( false ) {
+
BSONObjIterator it(from);
+
while ( it.more() ) {
BSONElement e = it.next();
const char *fn = e.fieldName();
+
uassert( 10147 , "Invalid modifier specified" + string( fn ), e.type() == Object );
BSONObj j = e.embeddedObject();
+
BSONObjIterator jt(j);
Mod::Op op = opFromStr( fn );
- if ( op == Mod::INC )
- strcpy((char *) fn, "$set"); // rewrite for op log
+
while ( jt.more() ) {
BSONElement f = jt.next(); // x:44
@@ -490,28 +620,46 @@ namespace mongo {
uassert( 10151 , "have conflict mod" , ! haveConflictingMod( fieldName ) );
uassert( 10152 , "Modifier $inc allowed for numbers only", f.isNumber() || op != Mod::INC );
uassert( 10153 , "Modifier $pushAll/pullAll allowed for arrays only", f.type() == Array || ( op != Mod::PUSH_ALL && op != Mod::PULL_ALL ) );
-
+
+ _hasDynamicArray = _hasDynamicArray || strstr( fieldName , ".$" ) > 0;
+
Mod m;
m.init( op , f );
m.setFieldName( f.fieldName() );
-
- // horrible - to be cleaned up
- if ( f.type() == NumberDouble ) {
- m.ndouble = (double *) f.value();
- m.nint = 0;
- } else if ( f.type() == NumberInt ) {
- m.ndouble = 0;
- m.nint = (int *) f.value();
- }
- else if( f.type() == NumberLong ) {
- m.ndouble = 0;
- m.nint = 0;
- m.nlong = (long long *) f.value();
+
+ if ( m.isIndexed( idxKeys ) ||
+ (backgroundKeys && m.isIndexed(*backgroundKeys)) ) {
+ _isIndexed++;
}
_mods[m.fieldName] = m;
+
+ DEBUGUPDATE( "\t\t " << fieldName << "\t" << _hasDynamicArray );
+ }
+ }
+
+ }
+
+ ModSet * ModSet::fixDynamicArray( const char * elemMatchKey ) const {
+ ModSet * n = new ModSet();
+ n->_isIndexed = _isIndexed;
+ n->_hasDynamicArray = _hasDynamicArray;
+ for ( ModHolder::const_iterator i=_mods.begin(); i!=_mods.end(); i++ ){
+ string s = i->first;
+ size_t idx = s.find( ".$" );
+ if ( idx == string::npos ){
+ n->_mods[s] = i->second;
+ continue;
}
+ StringBuilder buf(s.size()+strlen(elemMatchKey));
+ buf << s.substr(0,idx+1) << elemMatchKey << s.substr(idx+2);
+ string fixed = buf.str();
+ DEBUGUPDATE( "fixed dynamic: " << s << " -->> " << fixed );
+ n->_mods[fixed] = i->second;
+ ModHolder::iterator temp = n->_mods.find( fixed );
+ temp->second.setFieldName( temp->first.c_str() );
}
+ return n;
}
void checkNoMods( BSONObj o ) {
@@ -526,46 +674,58 @@ namespace mongo {
class UpdateOp : public QueryOp {
public:
- UpdateOp() : nscanned_() {}
+ UpdateOp() : _nscanned() {}
virtual void init() {
BSONObj pattern = qp().query();
- c_.reset( qp().newCursor().release() );
- if ( !c_->ok() )
+ _c.reset( qp().newCursor().release() );
+ if ( ! _c->ok() )
setComplete();
else
- matcher_.reset( new CoveredIndexMatcher( pattern, qp().indexKey() ) );
+ _matcher.reset( new CoveredIndexMatcher( pattern, qp().indexKey() ) );
}
virtual void next() {
- if ( !c_->ok() ) {
+ if ( ! _c->ok() ) {
setComplete();
return;
}
- nscanned_++;
- if ( matcher_->matches(c_->currKey(), c_->currLoc()) ) {
+ _nscanned++;
+ if ( _matcher->matches(_c->currKey(), _c->currLoc(), &_details ) ) {
setComplete();
return;
}
- c_->advance();
+ _c->advance();
}
bool curMatches(){
- return matcher_->matches(c_->currKey(), c_->currLoc() );
+ return _matcher->matches(_c->currKey(), _c->currLoc() , &_details );
}
virtual bool mayRecordPlan() const { return false; }
virtual QueryOp *clone() const {
return new UpdateOp();
}
- shared_ptr< Cursor > c() { return c_; }
- long long nscanned() const { return nscanned_; }
+ shared_ptr< Cursor > c() { return _c; }
+ long long nscanned() const { return _nscanned; }
+ MatchDetails& getMatchDetails(){ return _details; }
private:
- shared_ptr< Cursor > c_;
- long long nscanned_;
- auto_ptr< CoveredIndexMatcher > matcher_;
+ shared_ptr< Cursor > _c;
+ long long _nscanned;
+ auto_ptr< CoveredIndexMatcher > _matcher;
+ MatchDetails _details;
};
- UpdateResult updateObjects(const char *ns, BSONObj updateobjOrig, BSONObj patternOrig, bool upsert, bool multi, bool logop , OpDebug& debug ) {
+ UpdateResult updateObjects(const char *ns, const BSONObj& updateobj, BSONObj patternOrig, bool upsert, bool multi, bool logop , OpDebug& debug ) {
+ DEBUGUPDATE( "update: " << ns << " update: " << updateobj << " query: " << patternOrig << " upsert: " << upsert << " multi: " << multi );
int profile = cc().database()->profile;
StringBuilder& ss = debug.str;
+
+ if ( logLevel > 2 )
+ ss << " update: " << updateobj;
+
+ /* idea with these here it to make them loop invariant for multi updates, and thus be a bit faster for that case */
+ /* NOTE: when yield() is added herein, these must be refreshed after each call to yield! */
+ NamespaceDetails *d = nsdetails(ns); // can be null if an upsert...
+ NamespaceDetailsTransient *nsdt = &NamespaceDetailsTransient::get_w(ns);
+ /* end note */
uassert( 10155 , "cannot update reserved $ collection", strchr(ns, '$') == 0 );
if ( strstr(ns, ".system.") ) {
@@ -573,6 +733,21 @@ namespace mongo {
uassert( 10156 , "cannot update system collection", legalClientSystemNS( ns , true ) );
}
+ auto_ptr<ModSet> mods;
+ bool isOperatorUpdate = updateobj.firstElement().fieldName()[0] == '$';
+ int modsIsIndexed = false; // really the # of indexes
+ if ( isOperatorUpdate ){
+ if( d && d->backgroundIndexBuildInProgress ) {
+ set<string> bgKeys;
+ d->backgroundIdx().keyPattern().getFieldNames(bgKeys);
+ mods.reset( new ModSet(updateobj, nsdt->indexKeys(), &bgKeys) );
+ }
+ else {
+ mods.reset( new ModSet(updateobj, nsdt->indexKeys()) );
+ }
+ modsIsIndexed = mods->isIndexed();
+ }
+
set<DiskLoc> seenObjects;
QueryPlanSet qps( ns, patternOrig, BSONObj() );
@@ -593,11 +768,10 @@ namespace mongo {
c->advance();
continue;
}
-
+
BSONObj js(r);
BSONObj pattern = patternOrig;
- BSONObj updateobj = updateobjOrig;
if ( logop ) {
BSONObjBuilder idPattern;
@@ -620,43 +794,46 @@ namespace mongo {
/* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
regular ones at the moment. */
-
- const char *firstField = updateobj.firstElement().fieldName();
-
- if ( firstField[0] == '$' ) {
-
+ if ( isOperatorUpdate ) {
+
if ( multi ){
c->advance(); // go to next record in case this one moves
if ( seenObjects.count( loc ) )
continue;
- updateobj = updateobj.copy();
}
- ModSet mods;
- mods.getMods(updateobj);
- NamespaceDetailsTransient& ndt = NamespaceDetailsTransient::get_w(ns);
- set<string>& idxKeys = ndt.indexKeys();
- int isIndexed = mods.isIndexed( idxKeys );
-
- if ( isIndexed && multi ){
+ if ( modsIsIndexed && multi ){
c->noteLocation();
}
- if ( isIndexed <= 0 && mods.canApplyInPlaceAndVerify( loc.obj() ) ) {
- mods.applyModsInPlace( loc.obj() );
- //seenObjects.insert( loc );
+ const BSONObj& onDisk = loc.obj();
+
+ ModSet * useMods = mods.get();
+
+ auto_ptr<ModSet> mymodset;
+ if ( u->getMatchDetails().elemMatchKey && mods->hasDynamicArray() ){
+ useMods = mods->fixDynamicArray( u->getMatchDetails().elemMatchKey );
+ mymodset.reset( useMods );
+ }
+
+
+ auto_ptr<ModSetState> mss = useMods->prepare( onDisk );
+
+ if ( modsIsIndexed <= 0 && mss->canApplyInPlace() ){
+ mss->applyModsInPlace();// const_cast<BSONObj&>(onDisk) );
+
if ( profile )
ss << " fastmod ";
- if ( isIndexed ){
+ if ( modsIsIndexed ){
seenObjects.insert( loc );
}
}
else {
- BSONObj newObj = mods.createNewFromMods( loc.obj() );
- uassert( 12522 , "$ operator made objcet too large" , newObj.isValid() );
- DiskLoc newLoc = theDataFileMgr.update(ns, r, loc , newObj.objdata(), newObj.objsize(), debug);
- if ( newLoc != loc || isIndexed ){
+ BSONObj newObj = mss->createNewFromMods();
+ uassert( 12522 , "$ operator made object too large" , newObj.objsize() <= ( 4 * 1024 * 1024 ) );
+ DiskLoc newLoc = theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , newObj.objdata(), newObj.objsize(), debug);
+ if ( newLoc != loc || modsIsIndexed ) {
// object moved, need to make sure we don' get again
seenObjects.insert( newLoc );
}
@@ -664,25 +841,27 @@ namespace mongo {
}
if ( logop ) {
-
- assert( mods.size() );
+ DEV assert( mods->size() );
- if ( mods.haveArrayDepMod() ) {
+ if ( mss->haveArrayDepMod() ) {
BSONObjBuilder patternBuilder;
patternBuilder.appendElements( pattern );
- mods.appendSizeSpecForArrayDepMods( patternBuilder );
+ mss->appendSizeSpecForArrayDepMods( patternBuilder );
pattern = patternBuilder.obj();
}
- if ( mods.needOpLogRewrite() )
- updateobj = mods.getOpLogRewrite();
-
- logOp("u", ns, updateobj, &pattern );
+ if ( mss->needOpLogRewrite() ){
+ DEBUGUPDATE( "\t rewrite update: " << mss->getOpLogRewrite() );
+ logOp("u", ns, mss->getOpLogRewrite() , &pattern );
+ }
+ else {
+ logOp("u", ns, updateobj, &pattern );
+ }
}
numModded++;
if ( ! multi )
break;
- if ( multi && isIndexed )
+ if ( multi && modsIsIndexed )
c->checkLocation();
continue;
}
@@ -691,7 +870,7 @@ namespace mongo {
BSONElementManipulator::lookForTimestamps( updateobj );
checkNoMods( updateobj );
- theDataFileMgr.update(ns, r, loc , updateobj.objdata(), updateobj.objsize(), debug);
+ theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , updateobj.objdata(), updateobj.objsize(), debug);
if ( logop )
logOp("u", ns, updateobj, &pattern );
return UpdateResult( 1 , 0 , 1 );
@@ -705,13 +884,9 @@ namespace mongo {
ss << " nscanned:" << u->nscanned();
if ( upsert ) {
- if ( updateobjOrig.firstElement().fieldName()[0] == '$' ) {
+ if ( updateobj.firstElement().fieldName()[0] == '$' ) {
/* upsert of an $inc. build a default */
- ModSet mods;
- mods.getMods(updateobjOrig);
-
- BSONObj newObj = mods.createNewFromQuery( patternOrig );
-
+ BSONObj newObj = mods->createNewFromQuery( patternOrig );
if ( profile )
ss << " fastmodinsert ";
theDataFileMgr.insert(ns, newObj);
@@ -722,12 +897,13 @@ namespace mongo {
return UpdateResult( 0 , 1 , 1 );
}
uassert( 10159 , "multi update only works with $ operators" , ! multi );
- checkNoMods( updateobjOrig );
+ checkNoMods( updateobj );
if ( profile )
ss << " upsert ";
- theDataFileMgr.insert(ns, updateobjOrig);
+ BSONObj no = updateobj;
+ theDataFileMgr.insert(ns, no);
if ( logop )
- logOp( "i", ns, updateobjOrig );
+ logOp( "i", ns, no );
return UpdateResult( 0 , 0 , 1 );
}
return UpdateResult( 0 , 0 , 0 );
diff --git a/db/update.h b/db/update.h
index 26a8a8d..e14b0fb 100644
--- a/db/update.h
+++ b/db/update.h
@@ -23,11 +23,17 @@
namespace mongo {
- /* Used for modifiers such as $inc, $set, $push, ... */
+ class ModState;
+ class ModSetState;
+
+ /* Used for modifiers such as $inc, $set, $push, ...
+ * stores the info about a single operation
+ * once created should never be modified
+ */
struct Mod {
// See opFromStr below
- // 0 1 2 3 4 5 6 7 8 9 10
- enum Op { INC, SET, PUSH, PUSH_ALL, PULL, PULL_ALL , POP, UNSET, BITAND, BITOR , BIT } op;
+ // 0 1 2 3 4 5 6 7 8 9 10 11
+ enum Op { INC, SET, PUSH, PUSH_ALL, PULL, PULL_ALL , POP, UNSET, BITAND, BITOR , BIT , ADDTOSET } op;
static const char* modNames[];
static unsigned modNamesNum;
@@ -35,13 +41,7 @@ namespace mongo {
const char *fieldName;
const char *shortFieldName;
- // kind of lame; fix one day?
- double *ndouble;
- int *nint;
- long long *nlong;
-
BSONElement elt; // x:5 note: this is the actual element from the updateobj
- int pushStartSize;
boost::shared_ptr<Matcher> matcher;
void init( Op o , BSONElement& e ){
@@ -59,36 +59,32 @@ namespace mongo {
else
shortFieldName = fieldName;
}
-
- /* [dm] why is this const? (or rather, why was setn const?) i see why but think maybe clearer if were not. */
- void inc(BSONElement& n) const {
- uassert( 10160 , "$inc value is not a number", n.isNumber() );
- if( ndouble )
- *ndouble += n.numberDouble();
- else if( nint )
- *nint += n.numberInt();
- else
- *nlong += n.numberLong();
- }
-
- void setElementToOurNumericValue(BSONElement& e) const {
- BSONElementManipulator manip(e);
- if( e.type() == NumberLong )
- manip.setLong(_getlong());
- else
- manip.setNumber(_getn());
- }
-
- double _getn() const {
- if( ndouble ) return *ndouble;
- if( nint ) return *nint;
- return (double) *nlong;
- }
- long long _getlong() const {
- if( nlong ) return *nlong;
- if( ndouble ) return (long long) *ndouble;
- return *nint;
+
+ /**
+ * @param in incrememnts the actual value inside in
+ */
+ void incrementMe( BSONElement& in ) const {
+ BSONElementManipulator manip( in );
+
+ switch ( in.type() ){
+ case NumberDouble:
+ manip.setNumber( elt.numberDouble() + in.numberDouble() );
+ break;
+ case NumberLong:
+ manip.setLong( elt.numberLong() + in.numberLong() );
+ break;
+ case NumberInt:
+ manip.setInt( elt.numberInt() + in.numberInt() );
+ break;
+ default:
+ assert(0);
+ }
+
}
+
+ template< class Builder >
+ void appendIncremented( Builder& bb , const BSONElement& in, ModState& ms ) const;
+
bool operator<( const Mod &other ) const {
return strcmp( fieldName, other.fieldName ) < 0;
}
@@ -120,34 +116,15 @@ namespace mongo {
return false;
}
- void apply( BSONObjBuilder& b , BSONElement in );
+ template< class Builder >
+ void apply( Builder& b , BSONElement in , ModState& ms ) const;
/**
* @return true iff toMatch should be removed from the array
*/
bool _pullElementMatch( BSONElement& toMatch ) const;
- bool needOpLogRewrite() const {
- switch( op ){
- case BIT:
- case BITAND:
- case BITOR:
- // TODO: should we convert this to $set?
- return false;
- default:
- return false;
- }
- }
-
- void appendForOpLog( BSONObjBuilder& b ) const {
- const char * name = modNames[op];
-
- BSONObjBuilder bb( b.subobjStart( name ) );
- bb.append( elt );
- bb.done();
- }
-
- void _checkForAppending( BSONElement& e ){
+ void _checkForAppending( const BSONElement& e ) const {
if ( e.type() == Object ){
// this is a tiny bit slow, but rare and important
// only when setting something TO an object, not setting something in an object
@@ -157,12 +134,38 @@ namespace mongo {
}
}
+ bool isEach() const {
+ if ( elt.type() != Object )
+ return false;
+ BSONElement e = elt.embeddedObject().firstElement();
+ if ( e.type() != Array )
+ return false;
+ return strcmp( e.fieldName() , "$each" ) == 0;
+ }
+
+ BSONObj getEach() const {
+ return elt.embeddedObjectUserCheck().firstElement().embeddedObjectUserCheck();
+ }
+
+ void parseEach( BSONElementSet& s ) const {
+ BSONObjIterator i(getEach());
+ while ( i.more() ){
+ s.insert( i.next() );
+ }
+ }
+
};
- class ModSet {
+ /**
+ * stores a set of Mods
+ * once created, should never be changed
+ */
+ class ModSet : boost::noncopyable {
typedef map<string,Mod> ModHolder;
ModHolder _mods;
-
+ int _isIndexed;
+ bool _hasDynamicArray;
+
static void extractFields( map< string, BSONElement > &fields, const BSONElement &top, const string &base );
FieldCompareResult compare( const ModHolder::iterator &m, map< string, BSONElement >::iterator &p, const map< string, BSONElement >::iterator &pEnd ) const {
@@ -180,45 +183,6 @@ namespace mongo {
return compareDottedFieldNames( m->first, p->first.c_str() );
}
-
- void _appendNewFromMods( const string& root , Mod& m , BSONObjBuilder& b , set<string>& onedownseen );
-
- void appendNewFromMod( Mod& m , BSONObjBuilder& b ){
- switch ( m.op ){
-
- case Mod::PUSH: {
- BSONObjBuilder arr( b.subarrayStart( m.shortFieldName ) );
- arr.appendAs( m.elt, "0" );
- arr.done();
- m.pushStartSize = -1;
- break;
- }
-
- case Mod::PUSH_ALL: {
- b.appendAs( m.elt, m.shortFieldName );
- m.pushStartSize = -1;
- break;
- }
-
- case Mod::UNSET:
- case Mod::PULL:
- case Mod::PULL_ALL:
- // no-op b/c unset/pull of nothing does nothing
- break;
-
- case Mod::INC:
- case Mod::SET: {
- m._checkForAppending( m.elt );
- b.appendAs( m.elt, m.shortFieldName );
- break;
- }
- default:
- stringstream ss;
- ss << "unknown mod in appendNewFromMod: " << m.op;
- throw UserException( 9015, ss.str() );
- }
-
- }
bool mayAddEmbedded( map< string, BSONElement > &existing, string right ) {
for( string left = EmbeddedBuilder::splitDot( right );
@@ -279,39 +243,51 @@ namespace mongo {
}
break;
}
+ case 'a': {
+ if ( fn[2] == 'd' && fn[3] == 'd' ){
+ // add
+ if ( fn[4] == 'T' && fn[5] == 'o' && fn[6] == 'S' && fn[7] == 'e' && fn[8] == 't' && fn[9] == 0 )
+ return Mod::ADDTOSET;
+
+ }
+ }
default: break;
}
uassert( 10161 , "Invalid modifier specified " + string( fn ), false );
return Mod::INC;
}
- public:
+ ModSet(){}
- void getMods( const BSONObj &from );
- /**
- will return if can be done in place, or uassert if there is an error
- @return whether or not the mods can be done in place
- */
- bool canApplyInPlaceAndVerify( const BSONObj &obj ) const;
- void applyModsInPlace( const BSONObj &obj ) const;
+ public:
+
+ ModSet( const BSONObj &from ,
+ const set<string>& idxKeys = set<string>(),
+ const set<string>* backgroundKeys = 0
+ );
- // new recursive version, will replace at some point
- void createNewFromMods( const string& root , BSONObjBuilder& b , const BSONObj &obj );
+ // TODO: this is inefficient - should probably just handle when iterating
+ ModSet * fixDynamicArray( const char * elemMatchKey ) const;
- BSONObj createNewFromMods( const BSONObj &obj );
+ bool hasDynamicArray() const { return _hasDynamicArray; }
+ /**
+ * creates a ModSetState suitable for operation on obj
+ * doesn't change or modify this ModSet or any underying Mod
+ */
+ auto_ptr<ModSetState> prepare( const BSONObj& obj ) const;
+
+ /**
+ * given a query pattern, builds an object suitable for an upsert
+ * will take the query spec and combine all $ operators
+ */
BSONObj createNewFromQuery( const BSONObj& query );
/**
*
*/
- int isIndexed( const set<string>& idxKeys ) const {
- int numIndexes = 0;
- for ( ModHolder::const_iterator i = _mods.begin(); i != _mods.end(); i++ ){
- if ( i->second.isIndexed( idxKeys ) )
- numIndexes++;
- }
- return numIndexes;
+ int isIndexed() const {
+ return _isIndexed;
}
unsigned size() const { return _mods.size(); }
@@ -341,10 +317,190 @@ namespace mongo {
}
+ };
+
+ /**
+ * stores any information about a single Mod operating on a single Object
+ */
+ class ModState {
+ public:
+ const Mod * m;
+ BSONElement old;
+
+ const char * fixedName;
+ BSONElement * fixed;
+ int pushStartSize;
+
+ BSONType incType;
+ int incint;
+ double incdouble;
+ long long inclong;
+
+ ModState(){
+ fixedName = 0;
+ fixed = 0;
+ pushStartSize = -1;
+ incType = EOO;
+ }
+
+ Mod::Op op() const {
+ return m->op;
+ }
+
+ const char * fieldName() const {
+ return m->fieldName;
+ }
+
+ bool needOpLogRewrite() const {
+ if ( fixed || fixedName || incType )
+ return true;
+
+ switch( op() ){
+ case Mod::BIT:
+ case Mod::BITAND:
+ case Mod::BITOR:
+ // TODO: should we convert this to $set?
+ return false;
+ default:
+ return false;
+ }
+ }
+
+ void appendForOpLog( BSONObjBuilder& b ) const {
+ if ( incType ){
+ BSONObjBuilder bb( b.subobjStart( "$set" ) );
+ appendIncValue( bb );
+ bb.done();
+ return;
+ }
+
+ const char * name = fixedName ? fixedName : Mod::modNames[op()];
+
+ BSONObjBuilder bb( b.subobjStart( name ) );
+ if ( fixed )
+ bb.appendAs( *fixed , m->fieldName );
+ else
+ bb.append( m->elt );
+ bb.done();
+ }
+
+ template< class Builder >
+ void apply( Builder& b , BSONElement in ){
+ m->apply( b , in , *this );
+ }
+
+ template< class Builder >
+ void appendIncValue( Builder& b ) const {
+ switch ( incType ){
+ case NumberDouble:
+ b.append( m->shortFieldName , incdouble ); break;
+ case NumberLong:
+ b.append( m->shortFieldName , inclong ); break;
+ case NumberInt:
+ b.append( m->shortFieldName , incint ); break;
+ default:
+ assert(0);
+ }
+ }
+ };
+
+ /**
+ * this is used to hold state, meta data while applying a ModSet to a BSONObj
+ * the goal is to make ModSet const so its re-usable
+ */
+ class ModSetState : boost::noncopyable {
+ struct FieldCmp {
+ bool operator()( const string &l, const string &r ) const {
+ return lexNumCmp( l.c_str(), r.c_str() ) < 0;
+ }
+ };
+ typedef map<string,ModState,FieldCmp> ModStateHolder;
+ const BSONObj& _obj;
+ ModStateHolder _mods;
+ bool _inPlacePossible;
+
+ ModSetState( const BSONObj& obj )
+ : _obj( obj ) , _inPlacePossible(true){
+ }
+
+ /**
+ * @return if in place is still possible
+ */
+ bool amIInPlacePossible( bool inPlacePossible ){
+ if ( ! inPlacePossible )
+ _inPlacePossible = false;
+ return _inPlacePossible;
+ }
+
+ template< class Builder >
+ void createNewFromMods( const string& root , Builder& b , const BSONObj &obj );
+
+ template< class Builder >
+ void _appendNewFromMods( const string& root , ModState& m , Builder& b , set<string>& onedownseen );
+
+ template< class Builder >
+ void appendNewFromMod( ModState& ms , Builder& b ){
+ //const Mod& m = *(ms.m); // HACK
+ Mod& m = *((Mod*)(ms.m)); // HACK
+
+ switch ( m.op ){
+
+ case Mod::PUSH:
+ case Mod::ADDTOSET: {
+ if ( m.isEach() ){
+ b.appendArray( m.shortFieldName , m.getEach() );
+ }
+ else {
+ BSONObjBuilder arr( b.subarrayStart( m.shortFieldName ) );
+ arr.appendAs( m.elt, "0" );
+ arr.done();
+ }
+ break;
+ }
+
+ case Mod::PUSH_ALL: {
+ b.appendAs( m.elt, m.shortFieldName );
+ break;
+ }
+
+ case Mod::UNSET:
+ case Mod::PULL:
+ case Mod::PULL_ALL:
+ // no-op b/c unset/pull of nothing does nothing
+ break;
+
+ case Mod::INC:
+ ms.fixedName = "$set";
+ case Mod::SET: {
+ m._checkForAppending( m.elt );
+ b.appendAs( m.elt, m.shortFieldName );
+ break;
+ }
+ default:
+ stringstream ss;
+ ss << "unknown mod in appendNewFromMod: " << m.op;
+ throw UserException( 9015, ss.str() );
+ }
+
+ }
+
+ public:
+
+ bool canApplyInPlace() const {
+ return _inPlacePossible;
+ }
+
+ /**
+ * modified underlying _obj
+ */
+ void applyModsInPlace();
+
+ BSONObj createNewFromMods();
+
// re-writing for oplog
bool needOpLogRewrite() const {
- for ( ModHolder::const_iterator i = _mods.begin(); i != _mods.end(); i++ )
+ for ( ModStateHolder::const_iterator i = _mods.begin(); i != _mods.end(); i++ )
if ( i->second.needOpLogRewrite() )
return true;
return false;
@@ -352,31 +508,33 @@ namespace mongo {
BSONObj getOpLogRewrite() const {
BSONObjBuilder b;
- for ( ModHolder::const_iterator i = _mods.begin(); i != _mods.end(); i++ )
+ for ( ModStateHolder::const_iterator i = _mods.begin(); i != _mods.end(); i++ )
i->second.appendForOpLog( b );
return b.obj();
}
bool haveArrayDepMod() const {
- for ( ModHolder::const_iterator i = _mods.begin(); i != _mods.end(); i++ )
- if ( i->second.arrayDep() )
+ for ( ModStateHolder::const_iterator i = _mods.begin(); i != _mods.end(); i++ )
+ if ( i->second.m->arrayDep() )
return true;
return false;
}
void appendSizeSpecForArrayDepMods( BSONObjBuilder &b ) const {
- for ( ModHolder::const_iterator i = _mods.begin(); i != _mods.end(); i++ ) {
- const Mod& m = i->second;
- if ( m.arrayDep() ){
+ for ( ModStateHolder::const_iterator i = _mods.begin(); i != _mods.end(); i++ ) {
+ const ModState& m = i->second;
+ if ( m.m->arrayDep() ){
if ( m.pushStartSize == -1 )
- b.appendNull( m.fieldName );
+ b.appendNull( m.fieldName() );
else
- b << m.fieldName << BSON( "$size" << m.pushStartSize );
+ b << m.fieldName() << BSON( "$size" << m.pushStartSize );
}
}
}
+
+
+ friend class ModSet;
};
-
}
diff --git a/dbtests/basictests.cpp b/dbtests/basictests.cpp
index 20dc6d7..eaadf75 100644
--- a/dbtests/basictests.cpp
+++ b/dbtests/basictests.cpp
@@ -21,6 +21,7 @@
#include "dbtests.h"
#include "../util/base64.h"
+#include "../util/array.h"
namespace BasicTests {
@@ -227,6 +228,91 @@ namespace BasicTests {
ASSERT_EQUALS( 1 , x );
}
};
+
+ namespace ArrayTests {
+ class basic1 {
+ public:
+ void run(){
+ FastArray<int> a(100);
+ a.push_back( 5 );
+ a.push_back( 6 );
+
+ ASSERT_EQUALS( 2 , a.size() );
+
+ FastArray<int>::iterator i = a.begin();
+ ASSERT( i != a.end() );
+ ASSERT_EQUALS( 5 , *i );
+ ++i;
+ ASSERT( i != a.end() );
+ ASSERT_EQUALS( 6 , *i );
+ ++i;
+ ASSERT( i == a.end() );
+ }
+ };
+ };
+
+ class ThreadSafeStringTest {
+ public:
+ void run(){
+ ThreadSafeString s;
+ s = "eliot";
+ ASSERT_EQUALS( s , "eliot" );
+ ASSERT( s != "eliot2" );
+
+ ThreadSafeString s2 = s;
+ ASSERT_EQUALS( s2 , "eliot" );
+
+
+ {
+ string foo;
+ {
+ ThreadSafeString bar;
+ bar = "eliot2";
+ foo = bar;
+ }
+ ASSERT_EQUALS( "eliot2" , foo );
+ }
+ }
+ };
+
+ class LexNumCmp {
+ public:
+ void run() {
+ ASSERT_EQUALS( 0, lexNumCmp( "a", "a" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "aa" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "aa", "a" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "b" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "100", "50" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "50", "100" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "b", "a" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "aa", "aa" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "aa", "ab" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "ab", "aa" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "0", "a" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "a0", "aa" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "0" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "aa", "a0" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "0", "0" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "10", "10" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "1", "10" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "10", "1" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "11", "10" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "10", "11" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "f11f", "f10f" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "f10f", "f11f" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "f11f", "f111" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "f111", "f11f" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "f12f", "f12g" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "f12g", "f12f" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "aa{", "aab" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "aa{", "aa1" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "a1{", "a11" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "a1{a", "a1{" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a1{", "a1{a" ) );
+ ASSERT_EQUALS( 1, lexNumCmp("21", "11") );
+ ASSERT_EQUALS( -1, lexNumCmp("11", "21") );
+ }
+ };
class All : public Suite {
public:
@@ -244,6 +330,9 @@ namespace BasicTests {
add< sleeptest >();
add< AssertTests >();
+
+ add< ArrayTests::basic1 >();
+ add< LexNumCmp >();
}
} myall;
diff --git a/dbtests/btreetests.cpp b/dbtests/btreetests.cpp
index 5a0b15d..3c9dc8d 100644
--- a/dbtests/btreetests.cpp
+++ b/dbtests/btreetests.cpp
@@ -28,14 +28,14 @@ namespace BtreeTests {
class Base {
public:
- Base() {
+ Base() :
+ _context( ns() ) {
+
{
bool f = false;
assert( f = true );
massert( 10402 , "assert is misdefined", f);
}
-
- setClient( ns() );
BSONObjBuilder builder;
builder.append( "ns", ns() );
builder.append( "name", "testIndex" );
@@ -100,6 +100,7 @@ namespace BtreeTests {
}
private:
dblock lk_;
+ Client::Context _context;
IndexDetails idx_;
};
diff --git a/dbtests/clienttests.cpp b/dbtests/clienttests.cpp
index 1dadd1a..6735a40 100644
--- a/dbtests/clienttests.cpp
+++ b/dbtests/clienttests.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
// client.cpp
#include "stdafx.h"
@@ -82,7 +98,62 @@ namespace ClientTests {
};
+ class CS_10 : public Base {
+ public:
+ CS_10() : Base( "CS_10" ) {}
+ void run() {
+ string longs( 770, 'c' );
+ for( int i = 0; i < 1111; ++i )
+ db.insert( ns(), BSON( "a" << i << "b" << longs ) );
+ db.ensureIndex( ns(), BSON( "a" << 1 << "b" << 1 ) );
+
+ auto_ptr< DBClientCursor > c = db.query( ns(), Query().sort( BSON( "a" << 1 << "b" << 1 ) ) );
+ ASSERT_EQUALS( 1111, c->itcount() );
+ }
+ };
+ class PushBack : public Base {
+ public:
+ PushBack() : Base( "PushBack" ) {}
+ void run() {
+ for( int i = 0; i < 10; ++i )
+ db.insert( ns(), BSON( "i" << i ) );
+ auto_ptr< DBClientCursor > c = db.query( ns(), Query().sort( BSON( "i" << 1 ) ) );
+ BSONObj o = c->next();
+ ASSERT( c->more() );
+ ASSERT( c->moreInCurrentBatch() );
+ c->putBack( o );
+ ASSERT( c->more() );
+ ASSERT( c->moreInCurrentBatch() );
+ o = c->next();
+ BSONObj o2 = c->next();
+ BSONObj o3 = c->next();
+ c->putBack( o3 );
+ c->putBack( o2 );
+ c->putBack( o );
+ for( int i = 0; i < 10; ++i ) {
+ o = c->next();
+ ASSERT_EQUALS( i, o[ "i" ].number() );
+ }
+ ASSERT( !c->more() );
+ ASSERT( !c->moreInCurrentBatch() );
+ c->putBack( o );
+ ASSERT( c->more() );
+ ASSERT( c->moreInCurrentBatch() );
+ ASSERT_EQUALS( 1, c->itcount() );
+ }
+ };
+
+ class Create : public Base {
+ public:
+ Create() : Base( "Create" ) {}
+ void run() {
+ db.createCollection( "unittests.clienttests.create", 0, true );
+ BSONObj info;
+ ASSERT( db.runCommand( "unittests", BSON( "collstats" << "clienttests.create" ), info ) );
+ }
+ };
+
class All : public Suite {
public:
All() : Suite( "client" ){
@@ -92,6 +163,9 @@ namespace ClientTests {
add<DropIndex>();
add<ReIndex>();
add<ReIndex2>();
+ add<CS_10>();
+ add<PushBack>();
+ add<Create>();
}
} all;
diff --git a/dbtests/cursortests.cpp b/dbtests/cursortests.cpp
index 28a4ba4..f14c5fa 100644
--- a/dbtests/cursortests.cpp
+++ b/dbtests/cursortests.cpp
@@ -42,7 +42,7 @@ namespace CursorTests {
BoundList b;
b.push_back( pair< BSONObj, BSONObj >( BSON( "" << 1 ), BSON( "" << 2 ) ) );
b.push_back( pair< BSONObj, BSONObj >( BSON( "" << 4 ), BSON( "" << 6 ) ) );
- setClient( ns );
+ Client::Context ctx( ns );
BtreeCursor c( nsdetails( ns ), 1, nsdetails( ns )->idx(1), b, 1 );
ASSERT_EQUALS( "BtreeCursor a_1 multi", c.toString() );
double expected[] = { 1, 2, 4, 5, 6 };
@@ -72,7 +72,7 @@ namespace CursorTests {
b.push_back( pair< BSONObj, BSONObj >( BSON( "" << -50 ), BSON( "" << 2 ) ) );
b.push_back( pair< BSONObj, BSONObj >( BSON( "" << 40 ), BSON( "" << 60 ) ) );
b.push_back( pair< BSONObj, BSONObj >( BSON( "" << 109 ), BSON( "" << 200 ) ) );
- setClient( ns );
+ Client::Context ctx( ns );
BtreeCursor c( nsdetails( ns ), 1, nsdetails( ns )->idx(1), b, 1 );
ASSERT_EQUALS( "BtreeCursor a_1 multi", c.toString() );
double expected[] = { 0, 1, 2, 109 };
@@ -99,7 +99,7 @@ namespace CursorTests {
BoundList b;
b.push_back( pair< BSONObj, BSONObj >( BSON( "" << 6 ), BSON( "" << 4 ) ) );
b.push_back( pair< BSONObj, BSONObj >( BSON( "" << 2 ), BSON( "" << 1 ) ) );
- setClient( ns );
+ Client::Context ctx( ns );
BtreeCursor c( nsdetails( ns ), 1, nsdetails( ns )->idx(1), b, -1 );
ASSERT_EQUALS( "BtreeCursor a_1 reverse multi", c.toString() );
double expected[] = { 6, 5, 4, 2, 1 };
@@ -112,7 +112,7 @@ namespace CursorTests {
}
};
- } // namespace MultiBtreeCursorTests
+ } // namespace BtreeCursorTests
class All : public Suite {
public:
diff --git a/dbtests/dbtests.cpp b/dbtests/dbtests.cpp
index 3821163..4b81ea9 100644
--- a/dbtests/dbtests.cpp
+++ b/dbtests/dbtests.cpp
@@ -22,6 +22,7 @@
#include "dbtests.h"
int main( int argc, char** argv ) {
+ static StaticObserver StaticObserver;
return Suite::run(argc, argv, "/tmp/unittest");
}
diff --git a/dbtests/framework.cpp b/dbtests/framework.cpp
index 6ed5e72..4553686 100644
--- a/dbtests/framework.cpp
+++ b/dbtests/framework.cpp
@@ -358,4 +358,6 @@ namespace mongo {
}
}
+
+ void setupSignals(){}
}
diff --git a/dbtests/jsobjtests.cpp b/dbtests/jsobjtests.cpp
index 0402426..e470e60 100644
--- a/dbtests/jsobjtests.cpp
+++ b/dbtests/jsobjtests.cpp
@@ -251,6 +251,133 @@ namespace JsobjTests {
}
};
+ class AsTempObj{
+ public:
+ void run(){
+ {
+ BSONObjBuilder bb;
+ bb << "a" << 1;
+ BSONObj tmp = bb.asTempObj();
+ ASSERT(tmp.objsize() == 4+(1+2+4)+1);
+ ASSERT(tmp.valid());
+ ASSERT(tmp.hasField("a"));
+ ASSERT(!tmp.hasField("b"));
+ ASSERT(tmp == BSON("a" << 1));
+
+ bb << "b" << 2;
+ BSONObj obj = bb.obj();
+ ASSERT(obj.objsize() == 4+(1+2+4)+(1+2+4)+1);
+ ASSERT(obj.valid());
+ ASSERT(obj.hasField("a"));
+ ASSERT(obj.hasField("b"));
+ ASSERT(obj == BSON("a" << 1 << "b" << 2));
+ }
+ {
+ BSONObjBuilder bb;
+ bb << "a" << GT << 1;
+ BSONObj tmp = bb.asTempObj();
+ ASSERT(tmp.objsize() == 4+(1+2+(4+1+4+4+1))+1);
+ ASSERT(tmp.valid());
+ ASSERT(tmp.hasField("a"));
+ ASSERT(!tmp.hasField("b"));
+ ASSERT(tmp == BSON("a" << BSON("$gt" << 1)));
+
+ bb << "b" << LT << 2;
+ BSONObj obj = bb.obj();
+ ASSERT(obj.objsize() == 4+(1+2+(4+1+4+4+1))+(1+2+(4+1+4+4+1))+1);
+ ASSERT(obj.valid());
+ ASSERT(obj.hasField("a"));
+ ASSERT(obj.hasField("b"));
+ ASSERT(obj == BSON("a" << BSON("$gt" << 1)
+ << "b" << BSON("$lt" << 2)));
+ }
+ {
+ BSONObjBuilder bb(32);
+ bb << "a" << 1;
+ BSONObj tmp = bb.asTempObj();
+ ASSERT(tmp.objsize() == 4+(1+2+4)+1);
+ ASSERT(tmp.valid());
+ ASSERT(tmp.hasField("a"));
+ ASSERT(!tmp.hasField("b"));
+ ASSERT(tmp == BSON("a" << 1));
+
+ //force a realloc
+ BSONArrayBuilder arr;
+ for (int i=0; i < 10000; i++){
+ arr << i;
+ }
+ bb << "b" << arr.arr();
+ BSONObj obj = bb.obj();
+ ASSERT(obj.valid());
+ ASSERT(obj.hasField("a"));
+ ASSERT(obj.hasField("b"));
+ ASSERT(obj.objdata() != tmp.objdata());
+ }
+ }
+ };
+
+ struct AppendIntOrLL{
+ void run(){
+ const long long billion = 1000*1000*1000;
+ BSONObjBuilder b;
+ b.appendIntOrLL("i1", 1);
+ b.appendIntOrLL("i2", -1);
+ b.appendIntOrLL("i3", 1*billion);
+ b.appendIntOrLL("i4", -1*billion);
+
+ b.appendIntOrLL("L1", 2*billion);
+ b.appendIntOrLL("L2", -2*billion);
+ b.appendIntOrLL("L3", 4*billion);
+ b.appendIntOrLL("L4", -4*billion);
+ b.appendIntOrLL("L5", 16*billion);
+ b.appendIntOrLL("L6", -16*billion);
+
+ BSONObj o = b.obj();
+
+ ASSERT(o["i1"].type() == NumberInt);
+ ASSERT(o["i1"].number() == 1);
+ ASSERT(o["i2"].type() == NumberInt);
+ ASSERT(o["i2"].number() == -1);
+ ASSERT(o["i3"].type() == NumberInt);
+ ASSERT(o["i3"].number() == 1*billion);
+ ASSERT(o["i4"].type() == NumberInt);
+ ASSERT(o["i4"].number() == -1*billion);
+
+ ASSERT(o["L1"].type() == NumberLong);
+ ASSERT(o["L1"].number() == 2*billion);
+ ASSERT(o["L2"].type() == NumberLong);
+ ASSERT(o["L2"].number() == -2*billion);
+ ASSERT(o["L3"].type() == NumberLong);
+ ASSERT(o["L3"].number() == 4*billion);
+ ASSERT(o["L4"].type() == NumberLong);
+ ASSERT(o["L4"].number() == -4*billion);
+ ASSERT(o["L5"].type() == NumberLong);
+ ASSERT(o["L5"].number() == 16*billion);
+ ASSERT(o["L6"].type() == NumberLong);
+ ASSERT(o["L6"].number() == -16*billion);
+ }
+ };
+
+ struct AppendNumber {
+ void run(){
+ BSONObjBuilder b;
+ b.appendNumber( "a" , 5 );
+ b.appendNumber( "b" , 5.5 );
+ b.appendNumber( "c" , (1024LL*1024*1024)-1 );
+ b.appendNumber( "d" , (1024LL*1024*1024*1024)-1 );
+ b.appendNumber( "e" , 1024LL*1024*1024*1024*1024*1024 );
+
+ BSONObj o = b.obj();
+
+ ASSERT( o["a"].type() == NumberInt );
+ ASSERT( o["b"].type() == NumberDouble );
+ ASSERT( o["c"].type() == NumberInt );
+ ASSERT( o["d"].type() == NumberDouble );
+ ASSERT( o["e"].type() == NumberLong );
+
+ }
+ };
+
namespace Validation {
class Base {
@@ -339,8 +466,9 @@ namespace JsobjTests {
}
BSONObj invalid() const {
BSONObj ret = valid();
- set( ret, 0, get( ret, 0 ) + 1 );
- set( ret, 7, get( ret, 7 ) + 1 );
+ ASSERT_EQUALS( ret.firstElement().valuestr()[0] , 'b' );
+ ASSERT_EQUALS( ret.firstElement().valuestr()[1] , 0 );
+ ((char*)ret.firstElement().valuestr())[1] = 1;
return ret.copy();
}
};
@@ -391,32 +519,6 @@ namespace JsobjTests {
};
};
- class WrongSymbolSize : public Base {
- BSONObj valid() const {
- return fromjson( "{\"a\":\"b\"}" );
- }
- BSONObj invalid() const {
- BSONObj ret = valid();
- set( ret, 4, Symbol );
- set( ret, 0, get( ret, 0 ) + 1 );
- set( ret, 7, get( ret, 7 ) + 1 );
- return ret.copy();
- }
- };
-
- class WrongCodeSize : public Base {
- BSONObj valid() const {
- return fromjson( "{\"a\":\"b\"}" );
- }
- BSONObj invalid() const {
- BSONObj ret = valid();
- set( ret, 4, Code );
- set( ret, 0, get( ret, 0 ) + 1 );
- set( ret, 7, get( ret, 7 ) + 1 );
- return ret.copy();
- }
- };
-
class NoFieldNameEnd : public Base {
BSONObj valid() const {
return fromjson( "{\"a\":1}" );
@@ -759,7 +861,9 @@ namespace JsobjTests {
public:
void run() {
Date_t before = jsTime();
+ sleepmillis(1);
time_t now = time(NULL);
+ sleepmillis(1);
Date_t after = jsTime();
BSONObjBuilder b;
@@ -1245,6 +1349,77 @@ namespace JsobjTests {
}
};
+ class InvalidIDFind {
+ public:
+ void run(){
+ BSONObj x = BSON( "_id" << 5 << "t" << 2 );
+ {
+ char * crap = (char*)malloc( x.objsize() );
+ memcpy( crap , x.objdata() , x.objsize() );
+ BSONObj y( crap , false );
+ ASSERT_EQUALS( x , y );
+ free( crap );
+ }
+
+ {
+ char * crap = (char*)malloc( x.objsize() );
+ memcpy( crap , x.objdata() , x.objsize() );
+ int * foo = (int*)crap;
+ foo[0] = 123123123;
+ int state = 0;
+ try {
+ BSONObj y( crap , false );
+ state = 1;
+ }
+ catch ( std::exception& e ){
+ state = 2;
+ ASSERT( strstr( e.what() , "_id: 5" ) > 0 );
+ }
+ free( crap );
+ ASSERT_EQUALS( 2 , state );
+ }
+
+
+ }
+ };
+
+ class ElementSetTest {
+ public:
+ void run(){
+ BSONObj x = BSON( "a" << 1 << "b" << 1 << "c" << 2 );
+ BSONElement a = x["a"];
+ BSONElement b = x["b"];
+ BSONElement c = x["c"];
+ cout << "c: " << c << endl;
+ ASSERT( a.woCompare( b ) != 0 );
+ ASSERT( a.woCompare( b , false ) == 0 );
+
+ BSONElementSet s;
+ s.insert( a );
+ ASSERT_EQUALS( 1U , s.size() );
+ s.insert( b );
+ ASSERT_EQUALS( 1U , s.size() );
+ ASSERT( ! s.count( c ) );
+
+ ASSERT( s.find( a ) != s.end() );
+ ASSERT( s.find( b ) != s.end() );
+ ASSERT( s.find( c ) == s.end() );
+
+
+ s.insert( c );
+ ASSERT_EQUALS( 2U , s.size() );
+
+
+ ASSERT( s.find( a ) != s.end() );
+ ASSERT( s.find( b ) != s.end() );
+ ASSERT( s.find( c ) != s.end() );
+
+ ASSERT( s.count( a ) );
+ ASSERT( s.count( b ) );
+ ASSERT( s.count( c ) );
+ }
+ };
+
class All : public Suite {
public:
All() : Suite( "jsobj" ){
@@ -1264,6 +1439,9 @@ namespace JsobjTests {
add< BSONObjTests::MultiKeySortOrder > ();
add< BSONObjTests::TimestampTest >();
add< BSONObjTests::Nan >();
+ add< BSONObjTests::AsTempObj >();
+ add< BSONObjTests::AppendIntOrLL >();
+ add< BSONObjTests::AppendNumber >();
add< BSONObjTests::Validation::BadType >();
add< BSONObjTests::Validation::EooBeforeEnd >();
add< BSONObjTests::Validation::Undefined >();
@@ -1274,8 +1452,6 @@ namespace JsobjTests {
add< BSONObjTests::Validation::NegativeStringSize >();
add< BSONObjTests::Validation::WrongSubobjectSize >();
add< BSONObjTests::Validation::WrongDbrefNsSize >();
- add< BSONObjTests::Validation::WrongSymbolSize >();
- add< BSONObjTests::Validation::WrongCodeSize >();
add< BSONObjTests::Validation::NoFieldNameEnd >();
add< BSONObjTests::Validation::BadRegex >();
add< BSONObjTests::Validation::BadRegexOptions >();
@@ -1332,7 +1508,8 @@ namespace JsobjTests {
add< NumberParsing >();
add< bson2settest >();
add< checkForStorageTests >();
-
+ add< InvalidIDFind >();
+ add< ElementSetTest >();
}
} myall;
diff --git a/dbtests/jsontests.cpp b/dbtests/jsontests.cpp
index 68c6b5c..aa6b1a2 100644
--- a/dbtests/jsontests.cpp
+++ b/dbtests/jsontests.cpp
@@ -47,7 +47,7 @@ namespace JsonTests {
void run() {
BSONObjBuilder b;
b.append( "a", "\" \\ / \b \f \n \r \t" );
- ASSERT_EQUALS( "{ \"a\" : \"\\\" \\\\ \\/ \\b \\f \\n \\r \\t\" }", b.done().jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : \"\\\" \\\\ / \\b \\f \\n \\r \\t\" }", b.done().jsonString( Strict ) );
}
};
@@ -304,7 +304,7 @@ namespace JsonTests {
BSONObjBuilder b;
b.appendRegex( "a", "/\"", "i" );
BSONObj built = b.done();
- ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"\\/\\\"\", \"$options\" : \"i\" } }",
+ ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"/\\\"\", \"$options\" : \"i\" } }",
built.jsonString( Strict ) );
ASSERT_EQUALS( "{ \"a\" : /\\/\\\"/i }", built.jsonString( TenGen ) );
ASSERT_EQUALS( "{ \"a\" : /\\/\\\"/i }", built.jsonString( JS ) );
diff --git a/dbtests/jstests.cpp b/dbtests/jstests.cpp
index 86b0218..454dcdc 100644
--- a/dbtests/jstests.cpp
+++ b/dbtests/jstests.cpp
@@ -515,6 +515,62 @@ namespace JSTests {
};
+ class NumberLong {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+ s->localConnect( "blah" );
+ BSONObjBuilder b;
+ long long val = (long long)( 0xbabadeadbeefbaddULL );
+ b.append( "a", val );
+ BSONObj in = b.obj();
+ s->setObject( "a", in );
+ BSONObj out = s->getObject( "a" );
+ ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
+
+ ASSERT( s->exec( "printjson( a ); b = {b:a.a}", "foo", false, true, false ) );
+ out = s->getObject( "b" );
+ ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
+ ASSERT_EQUALS( val, out.firstElement().numberLong() );
+
+ ASSERT( s->exec( "c = {c:a.a.toString()}", "foo", false, true, false ) );
+ out = s->getObject( "c" );
+ stringstream ss;
+ ss << val;
+ ASSERT_EQUALS( ss.str(), out.firstElement().valuestr() );
+
+ ASSERT( s->exec( "d = {d:a.a.toNumber()}", "foo", false, true, false ) );
+ out = s->getObject( "d" );
+ ASSERT_EQUALS( NumberDouble, out.firstElement().type() );
+ ASSERT_EQUALS( double( val ), out.firstElement().number() );
+
+ ASSERT( s->exec( "e = {e:a.a.floatApprox}", "foo", false, true, false ) );
+ out = s->getObject( "e" );
+ ASSERT_EQUALS( NumberDouble, out.firstElement().type() );
+ ASSERT_EQUALS( double( val ), out.firstElement().number() );
+
+ ASSERT( s->exec( "f = {f:a.a.top}", "foo", false, true, false ) );
+ out = s->getObject( "f" );
+ ASSERT( NumberDouble == out.firstElement().type() || NumberInt == out.firstElement().type() );
+
+ s->setObject( "z", BSON( "z" << (long long)( 4 ) ) );
+ ASSERT( s->exec( "y = {y:z.z.top}", "foo", false, true, false ) );
+ out = s->getObject( "y" );
+ ASSERT_EQUALS( Undefined, out.firstElement().type() );
+
+ ASSERT( s->exec( "x = {x:z.z.floatApprox}", "foo", false, true, false ) );
+ out = s->getObject( "x" );
+ ASSERT( NumberDouble == out.firstElement().type() || NumberInt == out.firstElement().type() );
+ ASSERT_EQUALS( double( 4 ), out.firstElement().number() );
+
+ ASSERT( s->exec( "w = {w:z.z}", "foo", false, true, false ) );
+ out = s->getObject( "w" );
+ ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
+ ASSERT_EQUALS( 4, out.firstElement().numberLong() );
+
+ }
+ };
+
class WeirdObjects {
public:
@@ -673,6 +729,25 @@ namespace JSTests {
const char * _b;
};
+ class InformalDBRef {
+ public:
+ void run() {
+ client.insert( ns(), BSON( "i" << 1 ) );
+ BSONObj obj = client.findOne( ns(), BSONObj() );
+ client.remove( ns(), BSONObj() );
+ client.insert( ns(), BSON( "r" << BSON( "$ref" << "jstests.informaldbref" << "$id" << obj["_id"].__oid() << "foo" << "bar" ) ) );
+ obj = client.findOne( ns(), BSONObj() );
+ ASSERT_EQUALS( "bar", obj[ "r" ].embeddedObject()[ "foo" ].str() );
+
+ ASSERT( client.eval( "unittest", "x = db.jstests.informaldbref.findOne(); y = { r:x.r }; db.jstests.informaldbref.drop(); y.r[ \"a\" ] = \"b\"; db.jstests.informaldbref.save( y );" ) );
+ obj = client.findOne( ns(), BSONObj() );
+ ASSERT_EQUALS( "bar", obj[ "r" ].embeddedObject()[ "foo" ].str() );
+ ASSERT_EQUALS( "b", obj[ "r" ].embeddedObject()[ "a" ].str() );
+ }
+ private:
+ static const char *ns() { return "unittest.jstests.informaldbref"; }
+ };
+
class BinDataType {
public:
@@ -690,7 +765,7 @@ namespace JSTests {
Scope * s = globalScriptEngine->newScope();
s->localConnect( "asd" );
const char * foo = "asdas\0asdasd";
-
+ const char * base64 = "YXNkYXMAYXNkYXNk";
BSONObj in;
{
@@ -706,13 +781,30 @@ namespace JSTests {
BSONObj out = s->getObject( "y" );
ASSERT_EQUALS( BinData , out["c"].type() );
- //blah( "in " , in["b"] );
- //blah( "out" , out["c"] );
+// pp( "in " , in["b"] );
+// pp( "out" , out["c"] );
ASSERT_EQUALS( 0 , in["b"].woCompare( out["c"] , false ) );
// check that BinData js class is utilized
- s->invokeSafe( "q = tojson( x.b );", BSONObj() );
- ASSERT_EQUALS( "BinData", s->getString( "q" ).substr( 0, 7 ) );
+ s->invokeSafe( "q = x.b.toString();", BSONObj() );
+ stringstream expected;
+ expected << "BinData( type: " << ByteArray << ", base64: \"" << base64 << "\" )";
+ ASSERT_EQUALS( expected.str(), s->getString( "q" ) );
+
+ stringstream scriptBuilder;
+ scriptBuilder << "z = { c : new BinData( " << ByteArray << ", \"" << base64 << "\" ) };";
+ string script = scriptBuilder.str();
+ s->invokeSafe( script.c_str(), BSONObj() );
+ out = s->getObject( "z" );
+// pp( "out" , out["c"] );
+ ASSERT_EQUALS( 0 , in["b"].woCompare( out["c"] , false ) );
+
+ s->invokeSafe( "a = { f: new BinData( 128, \"\" ) };", BSONObj() );
+ out = s->getObject( "a" );
+ int len = -1;
+ out[ "f" ].binData( len );
+ ASSERT_EQUALS( 0, len );
+ ASSERT_EQUALS( 128, out[ "f" ].binDataType() );
delete s;
}
@@ -732,6 +824,28 @@ namespace JSTests {
}
};
+ class Speed1 {
+ public:
+ void run(){
+ BSONObj start = BSON( "x" << 5 );
+ BSONObj empty;
+
+ auto_ptr<Scope> s;
+ s.reset( globalScriptEngine->newScope() );
+
+ ScriptingFunction f = s->createFunction( "return this.x + 6;" );
+ s->setThis( &start );
+
+ Timer t;
+ double n = 0;
+ for ( ; n < 100000; n++ ){
+ s->invoke( f , empty );
+ ASSERT_EQUALS( 11 , s->getNumber( "return" ) );
+ }
+ cout << "speed1: " << ( n / t.millis() ) << " ops/ms" << endl;
+ }
+ };
+
class All : public Suite {
public:
All() : Suite( "js" ) {
@@ -752,15 +866,19 @@ namespace JSTests {
add< OtherJSTypes >();
add< SpecialDBTypes >();
add< TypeConservation >();
+ add< NumberLong >();
add< WeirdObjects >();
add< Utf8Check >();
add< LongUtf8String >();
add< CodeTests >();
add< DBRefTest >();
+ add< InformalDBRef >();
add< BinDataType >();
-
+
add< VarTests >();
+
+ add< Speed1 >();
}
} myall;
diff --git a/dbtests/namespacetests.cpp b/dbtests/namespacetests.cpp
index c820ca6..205c5d2 100644
--- a/dbtests/namespacetests.cpp
+++ b/dbtests/namespacetests.cpp
@@ -30,9 +30,9 @@ namespace NamespaceTests {
namespace IndexDetailsTests {
class Base {
dblock lk;
+ Client::Context _context;
public:
- Base() {
- setClient( ns() );
+ Base() : _context(ns()){
}
virtual ~Base() {
if ( id_.info.isNull() )
@@ -571,9 +571,11 @@ namespace NamespaceTests {
namespace NamespaceDetailsTests {
class Base {
+ const char *ns_;
dblock lk;
+ Client::Context _context;
public:
- Base( const char *ns = "unittests.NamespaceDetailsTests" ) : ns_( ns ) {}
+ Base( const char *ns = "unittests.NamespaceDetailsTests" ) : ns_( ns ) , _context( ns ) {}
virtual ~Base() {
if ( !nsd() )
return;
@@ -585,7 +587,6 @@ namespace NamespaceTests {
protected:
void create() {
dblock lk;
- setClient( ns() );
string err;
ASSERT( userCreateNS( ns(), fromjson( spec() ), err, false ) );
}
@@ -627,8 +628,6 @@ namespace NamespaceTests {
b.append( "a", as );
return b.obj();
}
- private:
- const char *ns_;
};
class Create : public Base {
diff --git a/dbtests/pairingtests.cpp b/dbtests/pairingtests.cpp
index b3e772b..e832310 100644
--- a/dbtests/pairingtests.cpp
+++ b/dbtests/pairingtests.cpp
@@ -144,7 +144,7 @@ namespace PairingTests {
ReplPair rp( "a", "b" );
rp.setMaster( ReplPair::State_CantArb, "foo" );
ASSERT( rp.state == ReplPair::State_CantArb );
- ASSERT_EQUALS( "foo", rp.info );
+ ASSERT_EQUALS( rp.info , "foo" );
rp.setMaster( ReplPair::State_Confused, "foo" );
ASSERT( rp.state == ReplPair::State_Confused );
}
diff --git a/dbtests/pdfiletests.cpp b/dbtests/pdfiletests.cpp
index 17659c0..fbacf8b 100644
--- a/dbtests/pdfiletests.cpp
+++ b/dbtests/pdfiletests.cpp
@@ -31,8 +31,7 @@ namespace PdfileTests {
class Base {
public:
- Base() {
- setClient( ns() );
+ Base() : _context( ns() ){
}
virtual ~Base() {
if ( !nsd() )
@@ -99,6 +98,7 @@ namespace PdfileTests {
}
private:
dblock lk_;
+ Client::Context _context;
};
class Empty : public Base {
@@ -269,8 +269,7 @@ namespace PdfileTests {
namespace Insert {
class Base {
public:
- Base() {
- setClient( ns() );
+ Base() : _context( ns() ){
}
virtual ~Base() {
if ( !nsd() )
@@ -287,6 +286,7 @@ namespace PdfileTests {
}
private:
dblock lk_;
+ Client::Context _context;
};
class UpdateDate : public Base {
diff --git a/dbtests/perf/perftest.cpp b/dbtests/perf/perftest.cpp
index 6fe9d6a..02b2fad 100644
--- a/dbtests/perf/perftest.cpp
+++ b/dbtests/perf/perftest.cpp
@@ -623,7 +623,7 @@ namespace Plan {
client_->ensureIndex( ns_.c_str(), BSON( ( names + i ) << 1 ), false, names + i );
}
lk_.reset( new dblock );
- setClient( ns_.c_str() );
+ Client::Context ctx( ns_ );
hint_ = BSON( "hint" << BSON( "a" << 1 ) );
hintElt_ = hint_.firstElement();
}
@@ -646,9 +646,9 @@ namespace Plan {
client_->ensureIndex( ns_.c_str(), BSON( ( names + i ) << 1 ), false, names + i );
}
lk_.reset( new dblock );
- setClient( ns_.c_str() );
}
void run() {
+ Client::Context ctx( ns_ );
for( int i = 0; i < 10000; ++i )
QueryPlanSet s( ns_.c_str(), BSONObj(), BSON( "a" << 1 ) );
}
@@ -665,9 +665,9 @@ namespace Plan {
client_->ensureIndex( ns_.c_str(), BSON( ( names + i ) << 1 ), false, names + i );
}
lk_.reset( new dblock );
- setClient( ns_.c_str() );
}
void run() {
+ Client::Context ctx( ns_.c_str() );
for( int i = 0; i < 10000; ++i )
QueryPlanSet s( ns_.c_str(), BSON( "a" << 1 ), BSONObj() );
}
diff --git a/dbtests/queryoptimizertests.cpp b/dbtests/queryoptimizertests.cpp
index c9465f3..d757f1a 100644
--- a/dbtests/queryoptimizertests.cpp
+++ b/dbtests/queryoptimizertests.cpp
@@ -30,7 +30,8 @@
namespace mongo {
extern BSONObj id_obj;
auto_ptr< QueryResult > runQuery(Message& m, QueryMessage& q ){
- CurOp op;
+ CurOp op( &(cc()) );
+ op.ensureStarted();
return runQuery( m , q , op );
}
} // namespace mongo
@@ -54,7 +55,6 @@ namespace QueryOptimizerTests {
virtual bool lowerInclusive() { return true; }
virtual BSONElement upper() { return maxKey.firstElement(); }
virtual bool upperInclusive() { return true; }
- private:
static void checkElt( BSONElement expected, BSONElement actual ) {
if ( expected.woCompare( actual, false ) ) {
stringstream ss;
@@ -143,7 +143,17 @@ namespace QueryOptimizerTests {
}
};
- class Regex : public Base {
+ struct RegexBase : Base {
+ void run() { //need to only look at first interval
+ FieldRangeSet s( "ns", query() );
+ checkElt( lower(), s.range( "a" ).intervals()[0].lower_.bound_ );
+ checkElt( upper(), s.range( "a" ).intervals()[0].upper_.bound_ );
+ ASSERT_EQUALS( lowerInclusive(), s.range( "a" ).intervals()[0].lower_.inclusive_ );
+ ASSERT_EQUALS( upperInclusive(), s.range( "a" ).intervals()[0].upper_.inclusive_ );
+ }
+ };
+
+ class Regex : public RegexBase {
public:
Regex() : o1_( BSON( "" << "abc" ) ), o2_( BSON( "" << "abd" ) ) {}
virtual BSONObj query() {
@@ -157,7 +167,7 @@ namespace QueryOptimizerTests {
BSONObj o1_, o2_;
};
- class RegexObj : public Base {
+ class RegexObj : public RegexBase {
public:
RegexObj() : o1_( BSON( "" << "abc" ) ), o2_( BSON( "" << "abd" ) ) {}
virtual BSONObj query() { return BSON("a" << BSON("$regex" << "^abc")); }
@@ -167,12 +177,24 @@ namespace QueryOptimizerTests {
BSONObj o1_, o2_;
};
- class UnhelpfulRegex : public Base {
+ class UnhelpfulRegex : public RegexBase {
+ public:
+ UnhelpfulRegex() {
+ BSONObjBuilder b;
+ b.appendMinForType("lower", String);
+ b.appendMaxForType("upper", String);
+ limits = b.obj();
+ }
+
virtual BSONObj query() {
BSONObjBuilder b;
b.appendRegex( "a", "abc" );
return b.obj();
}
+ virtual BSONElement lower() { return limits["lower"]; }
+ virtual BSONElement upper() { return limits["upper"]; }
+ virtual bool upperInclusive() { return false; }
+ BSONObj limits;
};
class In : public Base {
@@ -316,8 +338,7 @@ namespace QueryOptimizerTests {
namespace QueryPlanTests {
class Base {
public:
- Base() : indexNum_( 0 ) {
- setClient( ns() );
+ Base() : _ctx( ns() ) , indexNum_( 0 ) {
string err;
userCreateNS( ns(), BSONObj(), err, false );
}
@@ -357,6 +378,7 @@ namespace QueryOptimizerTests {
}
private:
dblock lk_;
+ Client::Context _ctx;
int indexNum_;
static DBDirectClient client_;
};
@@ -595,8 +617,7 @@ namespace QueryOptimizerTests {
namespace QueryPlanSetTests {
class Base {
public:
- Base() {
- setClient( ns() );
+ Base() : _context( ns() ){
string err;
userCreateNS( ns(), BSONObj(), err, false );
}
@@ -625,6 +646,7 @@ namespace QueryOptimizerTests {
static NamespaceDetails *nsd() { return nsdetails( ns() ); }
private:
dblock lk_;
+ Client::Context _context;
};
class NoIndexes : public Base {
diff --git a/dbtests/querytests.cpp b/dbtests/querytests.cpp
index 4681bf0..f175543 100644
--- a/dbtests/querytests.cpp
+++ b/dbtests/querytests.cpp
@@ -27,14 +27,17 @@
#include "dbtests.h"
+namespace mongo {
+ extern int __findingStartInitialTimeout;
+}
+
namespace QueryTests {
class Base {
dblock lk;
+ Client::Context _context;
public:
- Base() {
- dblock lk;
- setClient( ns() );
+ Base() : _context( ns() ){
addIndex( fromjson( "{\"a\":1}" ) );
}
~Base() {
@@ -186,6 +189,31 @@ namespace QueryTests {
}
};
+ class PositiveLimit : public ClientBase {
+ public:
+ const char* ns;
+ PositiveLimit() : ns("unittests.querytests.PositiveLimit") {}
+ ~PositiveLimit() {
+ client().dropCollection( ns );
+ }
+
+ void testLimit(int limit){
+ ASSERT_EQUALS(client().query( ns, BSONObj(), limit )->itcount(), limit);
+ }
+ void run() {
+ for(int i=0; i<1000; i++)
+ insert( ns, BSON( GENOID << "i" << i ) );
+
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 1 )->itcount(), 1);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 10 )->itcount(), 10);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 101 )->itcount(), 101);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 999 )->itcount(), 999);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 1000 )->itcount(), 1000);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 1001 )->itcount(), 1000);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 0 )->itcount(), 1000);
+ }
+ };
+
class ReturnOneOfManyAndTail : public ClientBase {
public:
~ReturnOneOfManyAndTail() {
@@ -193,6 +221,7 @@ namespace QueryTests {
}
void run() {
const char *ns = "unittests.querytests.ReturnOneOfManyAndTail";
+ client().createCollection( ns, 0, true );
insert( ns, BSON( "a" << 0 ) );
insert( ns, BSON( "a" << 1 ) );
insert( ns, BSON( "a" << 2 ) );
@@ -211,6 +240,7 @@ namespace QueryTests {
}
void run() {
const char *ns = "unittests.querytests.TailNotAtEnd";
+ client().createCollection( ns, 0, true );
insert( ns, BSON( "a" << 0 ) );
insert( ns, BSON( "a" << 1 ) );
insert( ns, BSON( "a" << 2 ) );
@@ -235,9 +265,14 @@ namespace QueryTests {
}
void run() {
const char *ns = "unittests.querytests.EmptyTail";
- ASSERT_EQUALS( 0, client().query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable )->getCursorId() );
+ client().createCollection( ns, 0, true );
+ auto_ptr< DBClientCursor > c = client().query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
+ ASSERT_EQUALS( 0, c->getCursorId() );
+ ASSERT( c->isDead() );
insert( ns, BSON( "a" << 0 ) );
- ASSERT( 0 != client().query( ns, QUERY( "a" << 1 ).hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable )->getCursorId() );
+ c = client().query( ns, QUERY( "a" << 1 ).hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
+ ASSERT( 0 != c->getCursorId() );
+ ASSERT( !c->isDead() );
}
};
@@ -248,14 +283,15 @@ namespace QueryTests {
}
void run() {
const char *ns = "unittests.querytests.TailableDelete";
+ client().createCollection( ns, 0, true, 2 );
insert( ns, BSON( "a" << 0 ) );
insert( ns, BSON( "a" << 1 ) );
auto_ptr< DBClientCursor > c = client().query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
c->next();
c->next();
ASSERT( !c->more() );
- client().remove( ns, QUERY( "a" << 1 ) );
insert( ns, BSON( "a" << 2 ) );
+ insert( ns, BSON( "a" << 3 ) );
ASSERT( !c->more() );
ASSERT_EQUALS( 0, c->getCursorId() );
}
@@ -268,6 +304,7 @@ namespace QueryTests {
}
void run() {
const char *ns = "unittests.querytests.TailableInsertDelete";
+ client().createCollection( ns, 0, true );
insert( ns, BSON( "a" << 0 ) );
insert( ns, BSON( "a" << 1 ) );
auto_ptr< DBClientCursor > c = client().query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
@@ -282,6 +319,52 @@ namespace QueryTests {
}
};
+ class TailCappedOnly : public ClientBase {
+ public:
+ ~TailCappedOnly() {
+ client().dropCollection( "unittest.querytests.TailCappedOnly" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.TailCappedOnly";
+ client().insert( ns, BSONObj() );
+ auto_ptr< DBClientCursor > c = client().query( ns, BSONObj(), 0, 0, 0, QueryOption_CursorTailable );
+ ASSERT( c->isDead() );
+ ASSERT( !client().getLastError().empty() );
+ }
+ };
+
+ class TailableQueryOnId : public ClientBase {
+ public:
+ ~TailableQueryOnId() {
+ client().dropCollection( "unittests.querytests.TailableQueryOnId" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.TailableQueryOnId";
+ BSONObj info;
+ client().runCommand( "unittests", BSON( "create" << "querytests.TailableQueryOnId" << "capped" << true << "autoIndexId" << true ), info );
+ insert( ns, BSON( "a" << 0 ) );
+ insert( ns, BSON( "a" << 1 ) );
+ auto_ptr< DBClientCursor > c1 = client().query( ns, QUERY( "a" << GT << -1 ), 0, 0, 0, QueryOption_CursorTailable );
+ OID id;
+ id.init("000000000000000000000000");
+ auto_ptr< DBClientCursor > c2 = client().query( ns, QUERY( "_id" << GT << id ), 0, 0, 0, QueryOption_CursorTailable );
+ c1->next();
+ c1->next();
+ ASSERT( !c1->more() );
+ c2->next();
+ c2->next();
+ ASSERT( !c2->more() );
+ insert( ns, BSON( "a" << 2 ) );
+ ASSERT( c1->more() );
+ ASSERT_EQUALS( 2, c1->next().getIntField( "a" ) );
+ ASSERT( !c1->more() );
+ ASSERT( c2->more() );
+ ASSERT_EQUALS( 2, c2->next().getIntField( "a" ) ); // SERVER-645
+ ASSERT( !c2->more() );
+ ASSERT( !c2->isDead() );
+ }
+ };
+
class OplogReplayMode : public ClientBase {
public:
~OplogReplayMode() {
@@ -289,13 +372,13 @@ namespace QueryTests {
}
void run() {
const char *ns = "unittests.querytests.OplogReplayMode";
- insert( ns, BSON( "a" << 3 ) );
- insert( ns, BSON( "a" << 0 ) );
- insert( ns, BSON( "a" << 1 ) );
- insert( ns, BSON( "a" << 2 ) );
- auto_ptr< DBClientCursor > c = client().query( ns, QUERY( "a" << GT << 1 ).hint( BSON( "$natural" << 1 ) ), 0, 0, 0, QueryOption_OplogReplay );
+ insert( ns, BSON( "ts" << 3 ) );
+ insert( ns, BSON( "ts" << 0 ) );
+ insert( ns, BSON( "ts" << 1 ) );
+ insert( ns, BSON( "ts" << 2 ) );
+ auto_ptr< DBClientCursor > c = client().query( ns, QUERY( "ts" << GT << 1 ).hint( BSON( "$natural" << 1 ) ), 0, 0, 0, QueryOption_OplogReplay );
ASSERT( c->more() );
- ASSERT_EQUALS( 2, c->next().getIntField( "a" ) );
+ ASSERT_EQUALS( 2, c->next().getIntField( "ts" ) );
ASSERT( !c->more() );
}
};
@@ -545,7 +628,7 @@ namespace QueryTests {
void check( const string &hintField ) {
const char *ns = "unittests.querytests.SubobjArr";
ASSERT( client().query( ns, Query( "{'a.b':1}" ).hint( BSON( hintField << 1 ) ) )->more() );
- ASSERT( !client().query( ns, Query( "{'a.b':[1]}" ).hint( BSON( hintField << 1 ) ) )->more() );
+ ASSERT( client().query( ns, Query( "{'a.b':[1]}" ).hint( BSON( hintField << 1 ) ) )->more() );
}
};
@@ -608,7 +691,7 @@ namespace QueryTests {
public:
void run() {
dblock lk;
- setClient( "unittests.DirectLocking" );
+ Client::Context ctx( "unittests.DirectLocking" );
client().remove( "a.b", BSONObj() );
ASSERT_EQUALS( "unittests", cc().database()->name );
}
@@ -678,6 +761,7 @@ namespace QueryTests {
CollectionBase( string leaf ){
_ns = "unittests.querytests.";
_ns += leaf;
+ client().dropCollection( ns() );
}
virtual ~CollectionBase(){
@@ -725,7 +809,7 @@ namespace QueryTests {
string err;
writelock lk("");
- setClient( "unittests" );
+ Client::Context ctx( "unittests" );
ASSERT( userCreateNS( ns() , fromjson( "{ capped : true , size : 2000 }" ) , err , false ) );
for ( int i=0; i<100; i++ ){
@@ -770,7 +854,7 @@ namespace QueryTests {
void run(){
writelock lk("");
- setClient( "unittests" );
+ Client::Context ctx( "unittests" );
for ( int i=0; i<50; i++ ){
insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
@@ -836,7 +920,7 @@ namespace QueryTests {
void run(){
writelock lk("");
- setClient( "unittests" );
+ Client::Context ctx( "unittests" );
for ( int i=0; i<1000; i++ ){
insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
@@ -860,7 +944,7 @@ namespace QueryTests {
void run(){
writelock lk("");
- setClient( "unittests" );
+ Client::Context ctx( "unittests" );
for ( int i=0; i<1000; i++ ){
insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
@@ -870,6 +954,75 @@ namespace QueryTests {
}
};
+ class FindingStart : public CollectionBase {
+ public:
+ FindingStart() : CollectionBase( "findingstart" ), _old( __findingStartInitialTimeout ) {
+ __findingStartInitialTimeout = 0;
+ }
+ ~FindingStart() {
+ __findingStartInitialTimeout = _old;
+ }
+
+ void run() {
+ BSONObj info;
+ ASSERT( client().runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "size" << 1000 << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
+
+ int i = 0;
+ for( int oldCount = -1;
+ count() != oldCount;
+ oldCount = count(), client().insert( ns(), BSON( "ts" << i++ ) ) );
+
+ for( int k = 0; k < 5; ++k ) {
+ client().insert( ns(), BSON( "ts" << i++ ) );
+ int min = client().query( ns(), Query().sort( BSON( "$natural" << 1 ) ) )->next()[ "ts" ].numberInt();
+ for( int j = -1; j < i; ++j ) {
+ auto_ptr< DBClientCursor > c = client().query( ns(), QUERY( "ts" << GTE << j ), 0, 0, 0, QueryOption_OplogReplay );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( ( j > min ? j : min ), c->next()[ "ts" ].numberInt() );
+ }
+ }
+ }
+
+ private:
+ int _old;
+ };
+
+ class WhatsMyUri : public CollectionBase {
+ public:
+ WhatsMyUri() : CollectionBase( "whatsmyuri" ) {}
+ void run() {
+ BSONObj result;
+ client().runCommand( "admin", BSON( "whatsmyuri" << 1 ), result );
+ ASSERT_EQUALS( unknownAddress.toString(), result[ "you" ].str() );
+ }
+ };
+
+ namespace parsedtests {
+ class basic1 {
+ public:
+ void _test( const BSONObj& in ){
+ ParsedQuery q( "a.b" , 5 , 6 , 9 , in , BSONObj() );
+ ASSERT_EQUALS( BSON( "x" << 5 ) , q.getFilter() );
+ }
+ void run(){
+ _test( BSON( "x" << 5 ) );
+ _test( BSON( "query" << BSON( "x" << 5 ) ) );
+ _test( BSON( "$query" << BSON( "x" << 5 ) ) );
+
+ {
+ ParsedQuery q( "a.b" , 5 , 6 , 9 , BSON( "x" << 5 ) , BSONObj() );
+ ASSERT_EQUALS( 6 , q.getNumToReturn() );
+ ASSERT( q.wantMore() );
+ }
+ {
+ ParsedQuery q( "a.b" , 5 , -6 , 9 , BSON( "x" << 5 ) , BSONObj() );
+ ASSERT_EQUALS( 6 , q.getNumToReturn() );
+ ASSERT( ! q.wantMore() );
+ }
+ }
+ };
+ };
+
class All : public Suite {
public:
All() : Suite( "query" ) {
@@ -883,11 +1036,14 @@ namespace QueryTests {
add< CountIndexedRegex >();
add< BoundedKey >();
add< GetMore >();
+ add< PositiveLimit >();
add< ReturnOneOfManyAndTail >();
add< TailNotAtEnd >();
add< EmptyTail >();
add< TailableDelete >();
add< TailableInsertDelete >();
+ add< TailCappedOnly >();
+ add< TailableQueryOnId >();
add< OplogReplayMode >();
add< ArrayId >();
add< UnderscoreNs >();
@@ -912,6 +1068,10 @@ namespace QueryTests {
add< TailableCappedRaceCondition >();
add< HelperTest >();
add< HelperByIdTest >();
+ add< FindingStart >();
+ add< WhatsMyUri >();
+
+ add< parsedtests::basic1 >();
}
} myall;
diff --git a/dbtests/repltests.cpp b/dbtests/repltests.cpp
index d4d97c1..c6ef6c2 100644
--- a/dbtests/repltests.cpp
+++ b/dbtests/repltests.cpp
@@ -37,17 +37,17 @@ namespace ReplTests {
}
class Base {
+ dblock lk;
+ Client::Context _context;
public:
- Base() {
- master = true;
+ Base() : _context( ns() ){
+ replSettings.master = true;
createOplog();
- dblock lk;
- setClient( ns() );
ensureHaveIdIndex( ns() );
}
~Base() {
try {
- master = false;
+ replSettings.master = false;
deleteAll( ns() );
deleteAll( cllNS() );
} catch ( ... ) {
@@ -88,7 +88,7 @@ namespace ReplTests {
int count() const {
int count = 0;
dblock lk;
- setClient( ns() );
+ Client::Context ctx( ns() );
auto_ptr< Cursor > c = theDataFileMgr.findAll( ns() );
for(; c->ok(); c->advance(), ++count ) {
// cout << "obj: " << c->current().toString() << endl;
@@ -97,7 +97,7 @@ namespace ReplTests {
}
static int opCount() {
dblock lk;
- setClient( cllNS() );
+ Client::Context ctx( cllNS() );
int count = 0;
for( auto_ptr< Cursor > c = theDataFileMgr.findAll( cllNS() ); c->ok(); c->advance() )
++count;
@@ -111,17 +111,21 @@ namespace ReplTests {
}
};
dblock lk;
- setClient( cllNS() );
vector< BSONObj > ops;
- for( auto_ptr< Cursor > c = theDataFileMgr.findAll( cllNS() ); c->ok(); c->advance() )
- ops.push_back( c->current() );
- setClient( ns() );
- for( vector< BSONObj >::iterator i = ops.begin(); i != ops.end(); ++i )
- Applier::apply( *i );
+ {
+ Client::Context ctx( cllNS() );
+ for( auto_ptr< Cursor > c = theDataFileMgr.findAll( cllNS() ); c->ok(); c->advance() )
+ ops.push_back( c->current() );
+ }
+ {
+ Client::Context ctx( ns() );
+ for( vector< BSONObj >::iterator i = ops.begin(); i != ops.end(); ++i )
+ Applier::apply( *i );
+ }
}
static void printAll( const char *ns ) {
dblock lk;
- setClient( ns );
+ Client::Context ctx( ns );
auto_ptr< Cursor > c = theDataFileMgr.findAll( ns );
vector< DiskLoc > toDelete;
out() << "all for " << ns << endl;
@@ -132,7 +136,7 @@ namespace ReplTests {
// These deletes don't get logged.
static void deleteAll( const char *ns ) {
dblock lk;
- setClient( ns );
+ Client::Context ctx( ns );
auto_ptr< Cursor > c = theDataFileMgr.findAll( ns );
vector< DiskLoc > toDelete;
for(; c->ok(); c->advance() ) {
@@ -144,7 +148,7 @@ namespace ReplTests {
}
static void insert( const BSONObj &o, bool god = false ) {
dblock lk;
- setClient( ns() );
+ Client::Context ctx( ns() );
theDataFileMgr.insert( ns(), o.objdata(), o.objsize(), god );
}
static BSONObj wid( const char *json ) {
@@ -460,6 +464,75 @@ namespace ReplTests {
BSONObj o_, q_, u_, ou_;
};
+ class UpdateInc2 : public Base {
+ public:
+ UpdateInc2() :
+ o_( fromjson( "{'_id':1,a:5}" ) ),
+ q_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{$inc:{a:3},$set:{x:5}}" ) ),
+ ou_( fromjson( "{'_id':1,a:8,x:5}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( ou_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ }
+ protected:
+ BSONObj o_, q_, u_, ou_;
+ };
+
+ class IncEmbedded : public Base {
+ public:
+ IncEmbedded() :
+ o_( fromjson( "{'_id':1,a:{b:3},b:{b:1}}" ) ),
+ q_( fromjson( "{'_id':1}" ) ),
+ u_( fromjson( "{$inc:{'a.b':1,'b.b':1}}" ) ),
+ ou_( fromjson( "{'_id':1,a:{b:4},b:{b:2}}" ) )
+ {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( ou_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ }
+ protected:
+ BSONObj o_, q_, u_, ou_;
+ };
+
+ class IncCreates : public Base {
+ public:
+ IncCreates() :
+ o_( fromjson( "{'_id':1}" ) ),
+ q_( fromjson( "{'_id':1}" ) ),
+ u_( fromjson( "{$inc:{'a':1}}" ) ),
+ ou_( fromjson( "{'_id':1,a:1}") )
+ {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( ou_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ }
+ protected:
+ BSONObj o_, q_, u_, ou_;
+ };
+
+
class UpsertInsertIdMod : public Base {
public:
UpsertInsertIdMod() :
@@ -885,7 +958,7 @@ namespace ReplTests {
class DbIdsTest {
public:
void run() {
- setClient( "unittests.repltest.DbIdsTest" );
+ Client::Context ctx( "unittests.repltest.DbIdsTest" );
s_.reset( new DbIds( "local.temp.DbIdsTest" ) );
s_->reset();
@@ -960,7 +1033,7 @@ namespace ReplTests {
class IdTrackerTest {
public:
void run() {
- setClient( "unittests.repltests.IdTrackerTest" );
+ Client::Context ctx( "unittests.repltests.IdTrackerTest" );
ASSERT( s_.inMem() );
s_.reset( 4 * sizeof( BSONObj ) - 1 );
@@ -1016,6 +1089,9 @@ namespace ReplTests {
add< Idempotence::UpsertInsertNoMods >();
add< Idempotence::UpdateSet >();
add< Idempotence::UpdateInc >();
+ add< Idempotence::UpdateInc2 >();
+ add< Idempotence::IncEmbedded >(); // SERVER-716
+ add< Idempotence::IncCreates >(); // SERVER-717
add< Idempotence::UpsertInsertIdMod >();
add< Idempotence::UpsertInsertSet >();
add< Idempotence::UpsertInsertInc >();
diff --git a/dbtests/test.vcproj b/dbtests/test.vcproj
index 4582ef2..002d464 100644
--- a/dbtests/test.vcproj
+++ b/dbtests/test.vcproj
@@ -144,7 +144,7 @@
/>
<Tool
Name="VCLinkerTool"
- AdditionalDependencies="ws2_32.lib"
+ AdditionalDependencies="ws2_32.lib psapi.lib"
LinkIncremental="1"
AdditionalLibraryDirectories="&quot;c:\program files\boost\boost_1_35_0\lib&quot;"
GenerateDebugInformation="true"
@@ -350,36 +350,8 @@
>
</File>
<File
- RelativePath="..\..\js\js\Debug\js.lib"
- >
- <FileConfiguration
- Name="Release|Win32"
- ExcludedFromBuild="true"
- >
- <Tool
- Name="VCCustomBuildTool"
- />
- </FileConfiguration>
- </File>
- <File
- RelativePath="..\..\js\js\Release\js.lib"
+ RelativePath="..\..\js\src\js.lib"
>
- <FileConfiguration
- Name="Debug|Win32"
- ExcludedFromBuild="true"
- >
- <Tool
- Name="VCCustomBuildTool"
- />
- </FileConfiguration>
- <FileConfiguration
- Name="Debug Recstore|Win32"
- ExcludedFromBuild="true"
- >
- <Tool
- Name="VCCustomBuildTool"
- />
- </FileConfiguration>
</File>
<File
RelativePath="..\pcre-7.4\pcrecpp.cc"
@@ -1330,7 +1302,7 @@
>
</File>
<File
- RelativePath="..\client\quorum.cpp"
+ RelativePath="..\client\syncclusterconnection.cpp"
>
</File>
<Filter
@@ -1358,6 +1330,10 @@
>
</File>
<File
+ RelativePath="..\db\cmdline.cpp"
+ >
+ </File>
+ <File
RelativePath="..\db\cmdline.h"
>
</File>
@@ -1398,6 +1374,10 @@
>
</File>
<File
+ RelativePath="..\db\diskloc.h"
+ >
+ </File>
+ <File
RelativePath="..\db\extsort.h"
>
</File>
@@ -1495,6 +1475,10 @@
>
</File>
<File
+ RelativePath="..\db\common.cpp"
+ >
+ </File>
+ <File
RelativePath="..\db\cursor.cpp"
>
</File>
@@ -1515,10 +1499,6 @@
>
</File>
<File
- RelativePath="..\db\dbstats.cpp"
- >
- </File>
- <File
RelativePath="..\db\dbwebserver.cpp"
>
</File>
@@ -1531,6 +1511,10 @@
>
</File>
<File
+ RelativePath="..\db\index_geo2d.cpp"
+ >
+ </File>
+ <File
RelativePath="..\db\instance.cpp"
>
</File>
@@ -1631,10 +1615,6 @@
>
</File>
<File
- RelativePath="..\util\top.cpp"
- >
- </File>
- <File
RelativePath="..\db\update.cpp"
>
</File>
@@ -1836,6 +1816,10 @@
/>
</FileConfiguration>
</File>
+ <File
+ RelativePath="..\scripting\utils.cpp"
+ >
+ </File>
</Filter>
<Filter
Name="dbtests"
@@ -1925,6 +1909,22 @@
>
</File>
</Filter>
+ <Filter
+ Name="stats"
+ >
+ <File
+ RelativePath="..\db\stats\counters.cpp"
+ >
+ </File>
+ <File
+ RelativePath="..\db\stats\snapshots.cpp"
+ >
+ </File>
+ <File
+ RelativePath="..\db\stats\top.cpp"
+ >
+ </File>
+ </Filter>
</Files>
<Globals>
</Globals>
diff --git a/dbtests/threadedtests.cpp b/dbtests/threadedtests.cpp
index f3ebe39..2ffafba 100644
--- a/dbtests/threadedtests.cpp
+++ b/dbtests/threadedtests.cpp
@@ -18,6 +18,7 @@
*/
#include "stdafx.h"
+#include "../util/atomic_int.h"
#include "../util/mvar.h"
#include "../util/thread_pool.h"
#include <boost/thread.hpp>
@@ -59,18 +60,26 @@ namespace ThreadedTests {
};
// Tested with up to 30k threads
- class IsWrappingIntAtomic : public ThreadedTest<> {
+ class IsAtomicUIntAtomic : public ThreadedTest<> {
static const int iterations = 1000000;
- WrappingInt target;
+ AtomicUInt target;
void subthread(){
for(int i=0; i < iterations; i++){
//target.x++; // verified to fail with this version
- target.atomicIncrement();
+ target++;
}
}
void validate(){
ASSERT_EQUALS(target.x , unsigned(nthreads * iterations));
+
+ AtomicUInt u;
+ ASSERT_EQUALS(0u, u);
+ ASSERT_EQUALS(0u, u++);
+ ASSERT_EQUALS(2u, ++u);
+ ASSERT_EQUALS(2u, u--);
+ ASSERT_EQUALS(0u, --u);
+ ASSERT_EQUALS(0u, u);
}
};
@@ -99,10 +108,10 @@ namespace ThreadedTests {
static const int iterations = 10000;
static const int nThreads = 8;
- WrappingInt counter;
+ AtomicUInt counter;
void increment(int n){
for (int i=0; i<n; i++){
- counter.atomicIncrement();
+ counter++;
}
}
@@ -111,13 +120,12 @@ namespace ThreadedTests {
ThreadPool tp(nThreads);
for (int i=0; i < iterations; i++){
- tp.schedule(&WrappingInt::atomicIncrement, &counter);
tp.schedule(&ThreadPoolTest::increment, this, 2);
}
tp.join();
- ASSERT(counter == (unsigned)(iterations * 3));
+ ASSERT(counter == (unsigned)(iterations * 2));
}
};
@@ -127,7 +135,7 @@ namespace ThreadedTests {
}
void setupTests(){
- add< IsWrappingIntAtomic >();
+ add< IsAtomicUIntAtomic >();
add< MVarTest >();
add< ThreadPoolTest >();
}
diff --git a/dbtests/updatetests.cpp b/dbtests/updatetests.cpp
index a9c4b1e..09caae6 100644
--- a/dbtests/updatetests.cpp
+++ b/dbtests/updatetests.cpp
@@ -525,8 +525,7 @@ namespace UpdateTests {
public:
void run(){
BSONObj b = BSON( "$inc" << BSON( "x" << 1 << "a.b" << 1 ) );
- ModSet m;
- m.getMods( b );
+ ModSet m(b);
ASSERT( m.haveModForField( "x" ) );
ASSERT( m.haveModForField( "a.b" ) );
@@ -551,10 +550,9 @@ namespace UpdateTests {
void test( BSONObj morig , BSONObj in , BSONObj wanted ){
BSONObj m = morig.copy();
- ModSet set;
- set.getMods( m );
+ ModSet set(m);
- BSONObj out = set.createNewFromMods( in );
+ BSONObj out = set.prepare(in)->createNewFromMods();
ASSERT_EQUALS( wanted , out );
}
};
@@ -663,6 +661,22 @@ namespace UpdateTests {
}
};
+
+ class inc2 : public SingleTest {
+ virtual BSONObj initial(){
+ return BSON( "_id" << 1 << "x" << 1 );
+ }
+ virtual BSONObj mod(){
+ return BSON( "$inc" << BSON( "x" << 2.5 ) );
+ }
+ virtual BSONObj after(){
+ return BSON( "_id" << 1 << "x" << 3.5 );
+ }
+ virtual const char * ns(){
+ return "unittests.inc2";
+ }
+
+ };
class bit1 : public Base {
const char * ns(){
@@ -760,6 +774,7 @@ namespace UpdateTests {
add< ModSetTests::push1 >();
add< basic::inc1 >();
+ add< basic::inc2 >();
add< basic::bit1 >();
add< basic::unset >();
add< basic::setswitchint >();
diff --git a/debian/changelog b/debian/changelog
index 3c6963c..d99fb63 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,33 @@
+mongodb (1.4.0) unstable; urgency=low
+
+ * stable release
+
+ -- Richard Kreuter <richard@10gen.com> Wed, 22 Mar 2010 16:56:28 -0500
+
+mongodb (1.3.5) unstable; urgency=low
+
+ * bug fixes
+
+ -- Richard Kreuter <richard@10gen.com> Wed, 22 Mar 2010 16:56:28 -0500
+
+mongodb (1.3.4) unstable; urgency=low
+
+ * bufg fixes
+
+ -- Richard Kreuter <richard@10gen.com> Wed, 17 Mar 2010 16:56:28 -0500
+
+mongodb (1.3.3) unstable; urgency=low
+
+ * geo
+
+ -- Richard Kreuter <richard@10gen.com> Fri, 05 Feb 2010 16:56:28 -0500
+
+mongodb (1.3.2) unstable; urgency=low
+
+ * munged debian files
+
+ -- Richard Kreuter <richard@10gen.com> Fri, 05 Feb 2010 16:56:28 -0500
+
mongodb (1.3.1) unstable; urgency=low
* Initial release
diff --git a/debian/control b/debian/control
index 6616a8b..2aef1c3 100644
--- a/debian/control
+++ b/debian/control
@@ -1,14 +1,14 @@
Source: mongodb
Section: devel
Priority: optional
-Maintainer: Kristina Chodorow <kristina@10gen.com>
-Build-Depends: debhelper (>= 7), libboost-dev, libpcre3, libpcre3-dev, scons, xulrunner-1.9-dev | xulrunner-1.9.1-dev, libboost-thread-dev, libboost-filesystem-dev, libboost-program-options-dev, libboost-date-time-dev
+Maintainer: Richard Kreuter <richard@10gen.com>
+Build-Depends: debhelper (>= 7), libpcre3, libpcre3-dev, scons, xulrunner-dev, libboost1.35-dev | libboost1.37-dev | libboost1.38-dev | libboost1.40-dev, libboost-thread1.35-dev | libboost-thread1.37-dev | libboost-thread1.38-dev | libboost-thread1.40-dev, libboost-filesystem1.35-dev | libboost-filesystem1.37-dev | libboost-filesystem1.38-dev | libboost-filesystem1.40-dev, libboost-program-options1.35-dev | libboost-program-options1.37-dev | libboost-program-options1.38-dev | libboost-program-options1.40-dev, libboost-date-time1.35-dev | libboost-date-time1.37-dev | libboost-date-time1.38-dev | libboost-date-time1.40-dev, libpcap-dev, libreadline-dev
Standards-Version: 3.8.0
Homepage: http://www.mongodb.org
Package: mongodb
Architecture: any
-Depends: ${shlibs:Depends}, ${misc:Depends}, xulrunner-1.9-dev
+Depends: ${shlibs:Depends}, ${misc:Depends}, xulrunner-dev
Description: An object/document-oriented database
MongoDB is a high-performance, open source, schema-free
document-oriented data store that's easy to deploy, manage
diff --git a/debian/files b/debian/files
new file mode 100644
index 0000000..2e28959
--- /dev/null
+++ b/debian/files
@@ -0,0 +1 @@
+mongodb_0.9.7_amd64.deb devel optional
diff --git a/debian/init.d b/debian/init.d
index b4eedf0..47a10a0 100644
--- a/debian/init.d
+++ b/debian/init.d
@@ -21,8 +21,8 @@
#
### BEGIN INIT INFO
# Provides: mongodb
-# Required-Start: $network $local_fs
-# Required-Stop:
+# Required-Start: $network $local_fs $remote_fs
+# Required-Stop: $network $local_fs $remote_fs
# Should-Start: $named
# Should-Stop:
# Default-Start: 2 3 4 5
@@ -48,50 +48,46 @@
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
DAEMON=/usr/bin/mongod
-DATA=/var/lib/mongodb
-NAME=MongoDB
DESC=database
+# Default defaults. Can be overridden by the /etc/default/$NAME
+NAME=mongodb
+CONF=/etc/mongodb.conf
+DATA=/var/lib/mongodb
+LOGDIR=/var/log/mongodb
+PIDFILE=/var/run/$NAME.pid
+LOGFILE=$LOGDIR/$NAME.log # Server logfile
+ENABLE_MONGODB=yes
+
+# Include mongodb defaults if available
+if [ -f /etc/default/$NAME ] ; then
+ . /etc/default/$NAME
+fi
+
if test ! -x $DAEMON; then
echo "Could not find $DAEMON"
exit 0
fi
+if test "x$ENABLE_MONGODB" != "xyes"; then
+ exit 0
+fi
+
if test ! -x $DATA; then
mkdir $DATA || exit 0
fi
. /lib/lsb/init-functions
-LOGDIR=/var/log/mongodb
-PIDFILE=/var/run/$NAME.pid
+STARTTIME=1
DIETIME=10 # Time to wait for the server to die, in seconds
# If this value is set too low you might not
# let some servers to die gracefully and
# 'restart' will not work
-LOGFILE=$LOGDIR/$NAME.log # Server logfile
-DAEMON_OPTS="--dbpath $DATA --logpath $LOGFILE run"
-
-
-# Include mongodb defaults if available
-if [ -f /etc/default/$NAME ] ; then
- . /etc/default/$NAME
-fi
-
-DAEMONUSER=mongodb
-# Check that the user exists (if we set a user)
-# Does the user exist?
-if [ -n "$DAEMONUSER" ] ; then
- if getent passwd | grep -q "^$DAEMONUSER:"; then
- # Obtain the uid and gid
- DAEMONUID=`getent passwd |grep "^$DAEMONUSER:" | awk -F : '{print $3}'`
- DAEMONGID=`getent passwd |grep "^$DAEMONUSER:" | awk -F : '{print $4}'`
- else
- log_failure_msg "The user $DAEMONUSER, required to run $NAME does not exist."
- exit 1
- fi
-fi
+DAEMONUSER=${DAEMONUSER:-mongodb}
+DAEMON_OPTS=${DAEMON_OPTS:-"--dbpath $DATA --logpath $LOGFILE run"}
+DAEMON_OPTS="$DAEMON_OPTS --config $CONF"
set -e
@@ -121,45 +117,22 @@ running() {
start_server() {
# Start the process using the wrapper
- if [ -z "$DAEMONUSER" ] ; then
- start-stop-daemon --background --start --quiet --pidfile $PIDFILE \
- --make-pidfile --exec $DAEMON -- $DAEMON_OPTS
- errcode=$?
- else
-# if we are using a daemonuser then change the user id
start-stop-daemon --background --start --quiet --pidfile $PIDFILE \
--make-pidfile --chuid $DAEMONUSER \
--exec $DAEMON -- $DAEMON_OPTS
errcode=$?
- fi
return $errcode
}
stop_server() {
# Stop the process using the wrapper
- if [ -z "$DAEMONUSER" ] ; then
- start-stop-daemon --stop --quiet --pidfile $PIDFILE
- rm $PIDFILE
- errcode=$?
- else
-# if we are using a daemonuser then look for process that match
start-stop-daemon --stop --quiet --pidfile $PIDFILE \
--user $DAEMONUSER \
--exec $DAEMON
errcode=$?
- fi
-
return $errcode
}
-reload_server() {
- [ ! -f "$PIDFILE" ] && return 1
- pid=pidofproc $PIDFILE # This is the daemon's pid
- # Send a SIGHUP
- kill -USR1 $pid
- return $?
-}
-
force_stop() {
# Force the process to die killing it manually
[ ! -e "$PIDFILE" ] && return
@@ -182,7 +155,7 @@ force_stop() {
case "$1" in
start)
- log_daemon_msg "Starting $DESC $NAME"
+ log_daemon_msg "Starting $DESC" "$NAME"
# Check if it's running first
if running ; then
log_progress_msg "apparently already running"
@@ -254,35 +227,11 @@ case "$1" in
exit 1
fi
;;
- # Use this if the daemon cannot reload
+ # MongoDB can't reload its configuration.
reload)
log_warning_msg "Reloading $NAME daemon: not implemented, as the daemon"
log_warning_msg "cannot re-read the config file (use restart)."
;;
- # And this if it cann
- #reload)
- #
- # If the daemon can reload its config files on the fly
- # for example by sending it SIGHUP, do it here.
- #
- # If the daemon responds to changes in its config file
- # directly anyway, make this a do-nothing entry.
- #
- # log_daemon_msg "Reloading $DESC configuration files" "$NAME"
- # if running ; then
- # reload_server
- # if ! running ; then
- # Process died after we tried to reload
- # log_progress_msg "died on reload"
- # log_end_msg 1
- # exit 1
- # fi
- # else
- # log_progress_msg "server is not running"
- # log_end_msg 1
- # exit 1
- # fi
- #;;
*)
N=/etc/init.d/$NAME
diff --git a/debian/lintian-overrides b/debian/lintian-overrides
new file mode 100644
index 0000000..c843e9e
--- /dev/null
+++ b/debian/lintian-overrides
@@ -0,0 +1,11 @@
+# Agreed with upstream, that redefining rpath is necessary as xulrunner used to
+# change API without changing so-name
+mongodb: binary-or-shlib-defines-rpath ./usr/bin/mongo /usr/lib64/xulrunner-1.9.1
+mongodb: binary-or-shlib-defines-rpath ./usr/bin/mongod /usr/lib64/xulrunner-1.9.1
+mongodb: binary-or-shlib-defines-rpath ./usr/bin/mongodump /usr/lib64/xulrunner-1.9.1
+mongodb: binary-or-shlib-defines-rpath ./usr/bin/mongoexport /usr/lib64/xulrunner-1.9.1
+mongodb: binary-or-shlib-defines-rpath ./usr/bin/mongofiles /usr/lib64/xulrunner-1.9.1
+mongodb: binary-or-shlib-defines-rpath ./usr/bin/mongoimport /usr/lib64/xulrunner-1.9.1
+mongodb: binary-or-shlib-defines-rpath ./usr/bin/mongorestore /usr/lib64/xulrunner-1.9.1
+mongodb: binary-or-shlib-defines-rpath ./usr/bin/mongos /usr/lib64/xulrunner-1.9.1
+mongodb: binary-or-shlib-defines-rpath ./usr/bin/mongosniff /usr/lib64/xulrunner-1.9.1
diff --git a/debian/mongod.1 b/debian/mongod.1
new file mode 100644
index 0000000..7b86359
--- /dev/null
+++ b/debian/mongod.1
@@ -0,0 +1,16 @@
+.\" Documentation for the MongoDB shell
+.TH MONGOD "1" "June 2009" "10gen" "Mongo Database"
+.SH "NAME"
+mongod \- the Mongo Daemon
+.SH "SYNOPSIS"
+.SH "DESCRIPTION"
+.PP
+\fBmongod\fR
+is a core MongoDB daemon. You are not supposed to call it directly, please refer to the wiki if necessary.
+.SH "COPYRIGHT"
+.PP
+Copyright 2007\-2009 10gen
+.SH "SEE ALSO"
+For more information, please refer to the MongoDB wiki, available at http://www.mongodb.org.
+.SH "AUTHOR"
+Antonin Kral
diff --git a/debian/mongodb.conf b/debian/mongodb.conf
new file mode 100644
index 0000000..6a5de05
--- /dev/null
+++ b/debian/mongodb.conf
@@ -0,0 +1,95 @@
+# mongodb.conf
+
+# Where to store the data.
+
+# Note: if you run mongodb as a non-root user (recommended) you may
+# need to create and set permissions for this directory manually,
+# e.g., if the parent directory isn't mutable by the mongodb user.
+dbpath=/var/lib/mongodb
+
+#where to log
+logpath=/var/log/mongodb/mongodb.log
+
+logappend=true
+
+#port = 27017
+
+
+
+# Enables periodic logging of CPU utilization and I/O wait
+#cpu = true
+
+# Turn on/off security. Off is currently the default
+#noauth = true
+#auth = true
+
+# Verbose logging output.
+#verbose = true
+
+# Inspect all client data for validity on receipt (useful for
+# developing drivers)
+#objcheck = true
+
+# Enable db quota management
+#quota = true
+
+# Set oplogging level where n is
+# 0=off (default)
+# 1=W
+# 2=R
+# 3=both
+# 7=W+some reads
+#oplog = 0
+
+# Diagnostic/debugging option
+#nocursors = true
+
+# Ignore query hints
+#nohints = true
+
+# Disable the HTTP interface (Defaults to localhost:27018).
+#nohttpinterface = true
+
+# Turns off server-side scripting. This will result in greatly limited
+# functionality
+#noscripting = true
+
+# Turns off table scans. Any query that would do a table scan fails.
+#notablescan = true
+
+# Disable data file preallocation.
+#noprealloc = true
+
+# Specify .ns file size for new databases.
+# nssize = <size>
+
+# Accout token for Mongo monitoring server.
+#mms-token = <token>
+
+# Server name for Mongo monitoring server.
+#mms-name = <server-name>
+
+# Ping interval for Mongo monitoring server.
+#mms-interval = <seconds>
+
+# Replication Options
+
+# in replicated mongo databases, specify here whether this is a slave or master
+#slave = true
+#source = master.example.com
+# Slave only: specify a single database to replicate
+#only = master.example.com
+# or
+#master = true
+#source = slave.example.com
+
+# Address of a server to pair with.
+#pairwith = <server:port>
+# Address of arbiter server.
+#arbiter = <server:port>
+# Automatically resync if slave data is stale
+#autoresync
+# Custom size for replication operation log.
+#oplogSize = <MB>
+# Size limit for in-memory storage of op ids.
+#opIdMem = <bytes>
diff --git a/debian/mongodb.upstart b/debian/mongodb.upstart
new file mode 100644
index 0000000..db061d5
--- /dev/null
+++ b/debian/mongodb.upstart
@@ -0,0 +1,15 @@
+# Ubuntu upstart file at /etc/init/mongodb.conf
+
+pre-start script
+ mkdir -p /var/lib/mongodb/
+ mkdir -p /var/log/mongodb/
+end script
+
+start on runlevel [2345]
+stop on runlevel [06]
+
+script
+ ENABLE_MONGODB="yes"
+ if [ -f /etc/default/mongodb ]; then . /etc/default/mongodb; fi
+ if [ "x$ENABLE_MONGODB" = "xyes" ]; then start-stop-daemon --start --quiet --chuid mongodb --exec /usr/bin/mongod -- --config /etc/mongodb.conf; fi
+end script
diff --git a/debian/mongoimport.1 b/debian/mongoimport.1
new file mode 100644
index 0000000..4b6c3de
--- /dev/null
+++ b/debian/mongoimport.1
@@ -0,0 +1,63 @@
+.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.37.1.
+.TH MONGOIMPORT "1" "January 2010" "10gen" "Mongo Database"
+.SH "NAME"
+mongoimport \- the Mongo import tool
+.SH "SYNOPSIS"
+\fBmongoimport [\fIOPTIONS\fR]\fR
+.SH "DESCRIPTION"
+.PP
+\fBmongoimport\fR
+is a tool to import a MongoDB collection from JSON, CSV, or TSV. The query can be filtered or a list of fields to input can be given.
+.\".SH "EXAMPLES"
+.\".TP
+.\".B mongoimport -d test -c test1 --csv -f "name,num"
+.\"import documents from test.test1 in CSV format
+.SS "OPTIONS"
+.TP
+\fB\-\-help\fR
+show usage information
+.TP
+.B \-h, \-\-host HOST
+server to connect to (default HOST=localhost)
+.TP
+.B \-d, \-\-db DATABASE
+database to use
+.TP
+.B \-c, \-\-c COLLECTION
+collection to use (some commands)
+.TP
+.B \-\-dbpath PATH
+directly access mongod data files in this path,
+instead of connecting to a mongod instance
+.TP
+.B \-v, \-\-verbose
+be more verbose (include multiple times for more
+verbosity e.g. \fB\-vvvvv\fR)
+.TP
+.B \-f, \-\-fields NAMES
+comma seperated list of field names e.g. \fB\-f\fR name,age
+.TP
+.B \-\-fieldFile FILE
+file with fields names \- 1 per line
+.TP
+.B \-\-ignoreBlanks
+if given, empty fields in csv and tsv will be ignored
+.TP
+.B \-\-type TYPE
+type of file to import. default: json (json,csv,tsv)
+.TP
+.B \-\-file FILE
+file to import from; if not specified stdin is used
+.TP
+.B \-\-drop
+drop collection first
+.TP
+.B \-\-headerline
+CSV,TSV only \- use first line as headers
+.SH "COPYRIGHT"
+.PP
+Copyright 2007\-2009 10gen
+.SH "SEE ALSO"
+For more information, please refer to the MongoDB wiki, available at http://www.mongodb.org.
+.SH "AUTHOR"
+Kristina Chodorow
diff --git a/debian/mongoimportjson.1 b/debian/mongoimportjson.1
deleted file mode 100644
index 5f3f450..0000000
--- a/debian/mongoimportjson.1
+++ /dev/null
@@ -1,45 +0,0 @@
-.\" Documentation for the MongoDB shell
-.TH MONGOIMPORTJSON "1" "June 2009" "10gen" "Mongo Database"
-.SH "NAME"
-mongoimportjson \- the Mongo import tool
-.SH "SYNOPSIS"
-\fBmongoimportjson [\fIOPTIONS\fR]\fR
-.SH "DESCRIPTION"
-.PP
-\fBmongoimportjson\fR
-is a tool to import JSON documents into MongoDB. This utility takes a single file that contains one JSON string per line and inserts it. A databaase and collection must be specified.
-.SH "OPTIONS"
-.TP
-.B \-\-help
-show usage information
-.TP
-.B \-h, \-\-host HOST
-server to connect to (default HOST=localhost)
-.TP
-.B \-d, \-\-db DATABASE
-database to use
-.TP
-.B \-c, \-\-c COLLECTION
-collection to use
-.TP
-.B \-\-file FILE
-file from which to import
-.TP
-.B \-\-dbpath PATH
-directly access mongod data files in this path, instead of connecting to a mongod instance
-.TP
-.B \-\-idbefore
-create id index before importing
-.TP
-.B \-\-id
-create id index after importing (recommended)
-.TP
-.B \-\-drop
-drop collection before importing
-.SH "COPYRIGHT"
-.PP
-Copyright 2007\-2009 10gen
-.SH "SEE ALSO"
-For more information, please refer to the MongoDB wiki, available at http://www.mongodb.org.
-.SH "AUTHOR"
-Kristina Chodorow
diff --git a/debian/mongosniff.1 b/debian/mongosniff.1
new file mode 100644
index 0000000..b6f1063
--- /dev/null
+++ b/debian/mongosniff.1
@@ -0,0 +1,30 @@
+.TH MONGOSNIFF "1" "Jan 2010" "10gen" "Mongo Database"
+.SH "NAME"
+mongosniff \- the Mongo packet analyzer
+.SH "SYNOPSIS"
+\fBmongosniff [\fIOPTIONS\fR] [\fI<port0> <port1> ...\fR]
+.SH "DESCRIPTION"
+.PP
+\fBmongosniff\fR
+is a analyzer tool for analyzing packets coming to your database.
+.PP
+.SH "OPTIONS"
+.TP
+.B \-\-forward
+Forward all parsed request messages to mongod instance at specified host:port
+.TP
+.B \-\-source
+Source of traffic to sniff, either a network interface or a file containing previously captured packets, in pcap format. If no source is specified, mongosniff will attempt to sniff from one of the machine's network interfaces.
+.TP
+.B \-\-help
+print a short help message.
+.TP
+.B <port0>
+These parameters are used to filter sniffing. By default, only port 27017 is sniffed.
+.SH "COPYRIGHT"
+.PP
+Copyright 2007\-2009 10gen
+.SH "SEE ALSO"
+For more information, please refer to the MongoDB wiki, available at http://www.mongodb.org.
+.SH "AUTHOR"
+Antonin Kral
diff --git a/debian/mongostat.1 b/debian/mongostat.1
new file mode 100644
index 0000000..5828104
--- /dev/null
+++ b/debian/mongostat.1
@@ -0,0 +1,39 @@
+.\" Documentation for the MongoDB shell
+.TH MONGOSTAT "15" "March 2010" "10gen" "Mongo Database"
+.SH "NAME"
+mongostat \- view statistics on a running mongod instance
+.SH "SYNOPSIS"
+\fBmongostat [\fIOPTIONS\fR]
+.SH "DESCRIPTION"
+.PP
+\fBmongostat\fR
+prints statistics on a running mongod instance.
+.SH "OPTIONS"
+.TP
+.B \-\-help
+show usage information
+.TP
+.B \-h, \-\-host HOST
+mongo host to connect to (use "left,right" for pairs)
+\" .TP
+\" .B \-\-port PORT
+\" port to connect to (default PORT=27017)
+.TP
+.B \-d, \-\-db ARG
+db to use
+.TP
+.B \-c, \-\-collection ARG
+collection to use (some commands)
+.TP
+.B \-u, \-\-username USERNAME
+specify user to log in as
+.TP
+.B \-p, \-\-password PASSWORD
+specify password of user (notice there is no space)
+.SH "COPYRIGHT"
+.PP
+Copyright 2010 10gen
+.SH "SEE ALSO"
+For more information, please refer to the MongoDB wiki, available at http://www.mongodb.org.
+.SH "AUTHOR"
+Eliot Horowitz
diff --git a/debian/postinst b/debian/postinst
index 3745b99..4d0e786 100644
--- a/debian/postinst
+++ b/debian/postinst
@@ -22,18 +22,18 @@ case "$1" in
configure)
# create a mongodb group and user
if ! grep -q mongodb /etc/passwd; then
- adduser --system mongodb
+ adduser --system --no-create-home mongodb
addgroup --system mongodb
adduser mongodb mongodb
fi
- # create db
+ # create db -- note: this should agree with dbpath in mongodb.conf
mkdir -p /var/lib/mongodb
- chown mongodb:mongodb /var/lib/mongodb
+ chown -R mongodb:mongodb /var/lib/mongodb
- # create logdir
+ # create logdir -- note: this should agree with logpath in mongodb.conf
mkdir -p /var/log/mongodb
- chown mongodb:mongodb /var/log/mongodb
+ chown -R mongodb:mongodb /var/log/mongodb
;;
abort-upgrade|abort-remove|abort-deconfigure)
diff --git a/debian/rules b/debian/rules
index c258723..2afdfdb 100644
--- a/debian/rules
+++ b/debian/rules
@@ -26,6 +26,7 @@ build-stamp: configure-stamp
# Add here commands to compile the package.
scons
#docbook-to-man debian/mongodb.sgml > mongodb.1
+ ls debian/*.1 > debian/mongodb.manpages
touch $@
@@ -34,7 +35,10 @@ clean:
dh_testroot
rm -f build-stamp configure-stamp
- scons -c
+ # FIXME: scons freaks out at the presence of target files
+ # under debian/mongodb.
+ #scons -c
+ rm -rf $(CURDIR)/debian/mongodb
rm -f config.log
rm -f mongo
rm -f mongod
@@ -49,16 +53,24 @@ clean:
rm -rf tools/*.o
rm -rf shell/*.o
rm -rf .sconf_temp
- dh_clean
+ rm -f buildscripts/*.pyc
+ rm -f *.pyc
+ rm -f buildinfo.cpp
+ dh_clean debian/files
install: build
dh_testdir
dh_testroot
- dh_clean -k
+ dh_prep
dh_installdirs
scons --prefix=$(CURDIR)/debian/mongodb/usr install
+ mkdir -p $(CURDIR)/debian/mongodb/etc
+ cp $(CURDIR)/debian/mongodb.conf $(CURDIR)/debian/mongodb/etc/mongodb.conf
+ mkdir -p $(CURDIR)/debian/mongodb/usr/share/lintian/overrides/
+ install -m 644 $(CURDIR)/debian/lintian-overrides \
+ $(CURDIR)/debian/mongodb/usr/share/lintian/overrides/mongodb
# Build architecture-independent files here.
binary-indep: build install
diff --git a/debian/ubuntu/mongodb.conf b/debian/ubuntu/mongodb.conf
deleted file mode 100644
index 90a5e44..0000000
--- a/debian/ubuntu/mongodb.conf
+++ /dev/null
@@ -1,13 +0,0 @@
-# Ubuntu upstart file at /etc/init/mongodb.conf
-# Presumes installation of mongodb is in /usr/local/mongodb/
-
-pre-start script
- mkdir -p /var/lib/mongodb/
- mkdir -p /var/log/mongodb/
-end script
-
-start on runlevel [345]
-
-exec /usr/local/mongodb/bin/mongod --config /usr/local/mongodb/mongodb_settings.conf
-
-respawn \ No newline at end of file
diff --git a/debian/ubuntu/mongodb_settings.conf b/debian/ubuntu/mongodb_settings.conf
deleted file mode 100644
index dbb83cb..0000000
--- a/debian/ubuntu/mongodb_settings.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-# This is an example config file for MongoDB to be located at /usr/local/mongodb/mongodb_settings.conf
-# and used by /etc/init/mongodb.conf
-
-logappend = true
-logpath = /var/log/mongodb/mongod.log
-dbpath = /var/lib/mongodb/ \ No newline at end of file
diff --git a/doxygenConfig b/doxygenConfig
index 6580365..dacf258 100644
--- a/doxygenConfig
+++ b/doxygenConfig
@@ -3,7 +3,7 @@
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = MongoDB
-PROJECT_NUMBER = 1.3.1
+PROJECT_NUMBER = 1.4.0
OUTPUT_DIRECTORY = docs
CREATE_SUBDIRS = NO
OUTPUT_LANGUAGE = English
diff --git a/jstests/array_match1.js b/jstests/array_match1.js
new file mode 100644
index 0000000..f764fb9
--- /dev/null
+++ b/jstests/array_match1.js
@@ -0,0 +1,31 @@
+
+t = db.array_match1
+t.drop();
+
+t.insert( { _id : 1 , a : [ 5 , 5 ] } )
+t.insert( { _id : 2 , a : [ 6 , 6 ] } )
+t.insert( { _id : 3 , a : [ 5 , 5 ] } )
+
+function test( f , m ){
+ var q = {};
+
+ q[f] = [5,5];
+ assert.eq( 2 , t.find( q ).itcount() , m + "1" )
+
+ q[f] = [6,6];
+ assert.eq( 1 , t.find( q ).itcount() , m + "2" )
+}
+
+test( "a" , "A" );
+t.ensureIndex( { a : 1 } )
+test( "a" , "B" );
+
+t.drop();
+
+t.insert( { _id : 1 , a : { b : [ 5 , 5 ] } } )
+t.insert( { _id : 2 , a : { b : [ 6 , 6 ] } } )
+t.insert( { _id : 3 , a : { b : [ 5 , 5 ] } } )
+
+test( "a.b" , "C" );
+t.ensureIndex( { a : 1 } )
+test( "a.b" , "D" );
diff --git a/jstests/arrayfind1.js b/jstests/arrayfind1.js
index 422369e..539fa61 100644
--- a/jstests/arrayfind1.js
+++ b/jstests/arrayfind1.js
@@ -33,6 +33,8 @@ t.find( { "a.x" : 1 } ).count();
t.find( { "a.x" : { $gt : 1 } } ).count();
res = t.find( { "a" : { $elemMatch : { x : { $gt : 2 } } } } ).explain()
-assert( res.cursor.indexOf( "BtreeC" ) == 0 , "C1" );
-assert.eq( 2 , t.find( { a : { $elemMatch : { x : { $gt : 2 } } } } ).count() , "D2" );
+assert( res.cursor.indexOf( "BtreeC" ) == 0 , "D2" );
+assert.eq( 2 , t.find( { a : { $elemMatch : { x : { $gt : 2 } } } } ).count() , "D3" );
+assert.eq( 2 , t.find( { a : { $ne:2, $elemMatch : { x : { $gt : 2 } } } } ).count() , "E1" );
+assert( t.find( { a : { $ne:2, $elemMatch : { x : { $gt : 2 } } } } ).explain().cursor.indexOf( "BtreeC" ) == 0 , "E2" );
diff --git a/jstests/arrayfind2.js b/jstests/arrayfind2.js
new file mode 100644
index 0000000..59bf2b0
--- /dev/null
+++ b/jstests/arrayfind2.js
@@ -0,0 +1,35 @@
+
+t = db.arrayfind2;
+t.drop();
+
+function go( prefix ){
+ assert.eq( 3 , t.count() , prefix + " A1" );
+ assert.eq( 3 , t.find( { a : { $elemMatch : { x : { $gt : 4 } } } } ).count() , prefix + " A2" );
+ assert.eq( 1 , t.find( { a : { $elemMatch : { x : { $lt : 2 } } } } ).count() , prefix + " A3" );
+ assert.eq( 1 , t.find( { a : { $all : [ { $elemMatch : { x : { $lt : 4 } } } ,
+ { $elemMatch : { x : { $gt : 5 } } } ] } } ).count() , prefix + " A4" );
+
+ assert.throws( function() { return t.findOne( { a : { $all : [ 1, { $elemMatch : { x : 3 } } ] } } ) } );
+ assert.throws( function() { return t.findOne( { a : { $all : [ /a/, { $elemMatch : { x : 3 } } ] } } ) } );
+
+}
+
+t.save( { a : [ { x : 1 } , { x : 5 } ] } )
+t.save( { a : [ { x : 3 } , { x : 5 } ] } )
+t.save( { a : [ { x : 3 } , { x : 6 } ] } )
+
+go( "no index" );
+t.ensureIndex( { a : 1 } );
+go( "index(a)" );
+
+assert.eq( [], t.find( { a : { $all : [ { $elemMatch : { x : 3 } } ] } } ).explain().indexBounds );
+
+t.ensureIndex( { "a.x": 1 } );
+
+assert.eq( [ [ {"a.x":3},{"a.x":3} ] ], t.find( { a : { $all : [ { $elemMatch : { x : 3 } } ] } } ).explain().indexBounds );
+// only first $elemMatch used to find bounds
+assert.eq( [ [ {"a.x":3},{"a.x":3} ] ], t.find( { a : { $all : [ { $elemMatch : { x : 3 } }, { $elemMatch : { y : 5 } } ] } } ).explain().indexBounds );
+
+t.ensureIndex( { "a.x":1,"a.y":-1 } );
+
+assert.eq( [ [ {"a.x":3,"a.y":1.7976931348623157e+308},{"a.x":3,"a.y":4} ] ], t.find( { a : { $all : [ { $elemMatch : { x : 3, y : { $gt: 4 } } } ] } } ).explain().indexBounds );
diff --git a/jstests/auth/auth1.js b/jstests/auth/auth1.js
new file mode 100644
index 0000000..6fc6dc5
--- /dev/null
+++ b/jstests/auth/auth1.js
@@ -0,0 +1,73 @@
+// test read/write permissions
+
+port = allocatePorts( 1 )[ 0 ];
+baseName = "jstests_auth_auth1";
+
+m = startMongod( "--auth", "--port", port, "--dbpath", "/data/db/" + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( "test" );
+
+t = db[ baseName ];
+t.drop();
+
+users = db.getCollection( "system.users" );
+users.remove( {} );
+
+db.addUser( "eliot" , "eliot" );
+db.addUser( "guest" , "guest", true );
+db.getSisterDB( "admin" ).addUser( "super", "super" );
+
+assert.throws( function() { t.findOne() }, [], "read without login" );
+
+assert( db.auth( "eliot" , "eliot" ) , "auth failed" );
+
+for( i = 0; i < 999; ++i ) {
+ t.save( {i:i} );
+}
+assert.eq( 999, t.count() , "A1" );
+assert.eq( 999, t.find().toArray().length , "A2" );
+
+assert.eq( 999, db.eval( function() { return db[ "jstests_auth_auth1" ].count(); } ) , "A3" );
+db.eval( function() { db[ "jstests_auth_auth1" ].save( {i:999} ) } );
+assert.eq( 1000, db.eval( function() { return db[ "jstests_auth_auth1" ].count(); } ) , "A4" );
+
+var p = { key : { i : true } ,
+ reduce : function(obj,prev) { prev.count++; },
+initial: { count: 0 }
+};
+
+assert.eq( 1000, t.group( p ).length , "A5" );
+
+if ( db.runCommand( "features" ).readlock ){
+ print( "doing readonly test" );
+ assert( db.auth( "guest", "guest" ), "auth failed 2" );
+
+ assert.eq( 1000, t.count() , "B1" );
+ assert.eq( 1000, t.find().toArray().length , "B2" ); // make sure we have a getMore in play
+ assert.commandWorked( db.runCommand( {ismaster:1} ) , "B3" );
+
+ assert( !db.getLastError() , "B4" );
+ t.save( {} ); // fail
+ assert( db.getLastError() , "B5: " + tojson( db.getLastErrorObj() ) );
+ assert.eq( 1000, t.count() , "B6" );
+
+ assert.eq( 2, db.system.users.count() , "B7" );
+ assert( !db.getLastError() , "B8" );
+ db.addUser( "a", "b" );
+ assert( db.getLastError() , "B9" );
+ assert.eq( 2, db.system.users.count() , "B10");
+
+ assert.eq( 1000, db.eval( function() { return db[ "jstests_auth_auth1" ].count(); } ) , "C1" );
+ assert.eq( 1000, db.eval( function() { return db[ "jstests_auth_auth1" ].find().toArray().length; } ) , "C2" );
+ db.eval( function() { db[ "jstests_auth_auth1" ].save( {i:1} ) } , "C3" );
+ assert.eq( 1000, db.eval( function() { return db[ "jstests_auth_auth1" ].count(); } ) , "C4" );
+
+ assert.eq( 1000, t.group( p ).length , "C5" );
+
+ var p = { key : { i : true } ,
+ reduce : function(obj,prev) { db.jstests_auth_auth1.save( {i:10000} ); prev.count++; },
+ initial: { count: 0 }
+ };
+
+ assert.throws( function() { return t.group( p ) }, "write reduce didn't fail" );
+}
+
diff --git a/jstests/auth/copyauth.js b/jstests/auth/copyauth.js
new file mode 100644
index 0000000..043b863
--- /dev/null
+++ b/jstests/auth/copyauth.js
@@ -0,0 +1,29 @@
+// test copyDatabase from an auth enabled source
+
+ports = allocatePorts( 2 );
+
+var baseName = "jstests_clone_copyauth";
+
+var source = startMongod( "--auth", "--port", ports[ 0 ], "--dbpath", "/data/db/" + baseName + "_source", "--nohttpinterface", "--bind_ip", "127.0.0.1", "--smallfiles" );
+var target = startMongod( "--port", ports[ 1 ], "--dbpath", "/data/db/" + baseName + "_target", "--nohttpinterface", "--bind_ip", "127.0.0.1", "--smallfiles" );
+
+source.getDB( baseName )[ baseName ].save( {i:1} );
+source.getDB( baseName ).addUser( "foo", "bar" );
+source.getDB( "admin" ).addUser( "super", "super" );
+assert.throws( function() { source.getDB( baseName )[ baseName ].findOne(); } );
+
+target.getDB( baseName ).copyDatabase( baseName, baseName, source.host, "foo", "bar" );
+assert.eq( 1, target.getDB( baseName )[ baseName ].count() );
+assert.eq( 1, target.getDB( baseName )[ baseName ].findOne().i );
+
+stopMongod( ports[ 1 ] );
+
+var target = startMongod( "--auth", "--port", ports[ 1 ], "--dbpath", "/data/db/" + baseName + "_target", "--nohttpinterface", "--bind_ip", "127.0.0.1", "--smallfiles" );
+
+target.getDB( "admin" ).addUser( "super1", "super1" );
+assert.throws( function() { source.getDB( baseName )[ baseName ].findOne(); } );
+target.getDB( "admin" ).auth( "super1", "super1" );
+
+target.getDB( baseName ).copyDatabase( baseName, baseName, source.host, "foo", "bar" );
+assert.eq( 1, target.getDB( baseName )[ baseName ].count() );
+assert.eq( 1, target.getDB( baseName )[ baseName ].findOne().i );
diff --git a/jstests/auth1.js b/jstests/auth1.js
index f6890cc..ce0159b 100644
--- a/jstests/auth1.js
+++ b/jstests/auth1.js
@@ -1,5 +1,3 @@
-
-
users = db.getCollection( "system.users" );
users.remove( {} );
diff --git a/jstests/auth2.js b/jstests/auth2.js
new file mode 100644
index 0000000..9b6dfad
--- /dev/null
+++ b/jstests/auth2.js
@@ -0,0 +1,5 @@
+// just make sure logout doesn't break anything
+
+// SERVER-724
+db.runCommand({logout : 1});
+db.runCommand({logout : 1});
diff --git a/jstests/capped3.js b/jstests/capped3.js
index f3b29b7..c4f1a3c 100644
--- a/jstests/capped3.js
+++ b/jstests/capped3.js
@@ -5,12 +5,12 @@ t2.drop();
for( i = 0; i < 1000; ++i ) {
t.save( {i:i} );
}
-assert.commandWorked( db.runCommand( { cloneCollectionAsCapped:"jstests_capped3", toCollection:"jstests_capped3_clone", size:100000 } ) );
+assert.commandWorked( db.runCommand( { cloneCollectionAsCapped:"jstests_capped3", toCollection:"jstests_capped3_clone", size:100000 } ), "A" );
c = t2.find();
for( i = 0; i < 1000; ++i ) {
- assert.eq( i, c.next().i );
+ assert.eq( i, c.next().i, "B" );
}
-assert( !c.hasNext() );
+assert( !c.hasNext(), "C" );
t.drop();
t2.drop();
@@ -18,13 +18,15 @@ t2.drop();
for( i = 0; i < 1000; ++i ) {
t.save( {i:i} );
}
-assert.commandWorked( db.runCommand( { cloneCollectionAsCapped:"jstests_capped3", toCollection:"jstests_capped3_clone", size:1000 } ) );
+assert.commandWorked( db.runCommand( { cloneCollectionAsCapped:"jstests_capped3", toCollection:"jstests_capped3_clone", size:1000 } ), "D" );
c = t2.find().sort( {$natural:-1} );
i = 999;
while( c.hasNext() ) {
- assert.eq( i--, c.next().i );
+ assert.eq( i--, c.next().i, "E" );
}
-assert( i < 990 );
+print( "i: " + i );
+print( "stats: " + tojson( t2.stats() ) );
+assert( i < 990, "F" );
t.drop();
t2.drop();
@@ -32,11 +34,11 @@ t2.drop();
for( i = 0; i < 1000; ++i ) {
t.save( {i:i} );
}
-assert.commandWorked( t.convertToCapped( 1000 ) );
+assert.commandWorked( t.convertToCapped( 1000 ), "G" );
c = t.find().sort( {$natural:-1} );
i = 999;
while( c.hasNext() ) {
- assert.eq( i--, c.next().i );
+ assert.eq( i--, c.next().i, "H" );
}
-assert( i < 990 );
-assert( i > 900 );
+assert( i < 990, "I" );
+assert( i > 900, "J" );
diff --git a/jstests/capped5.js b/jstests/capped5.js
index a5d04de..1c7ec3d 100644
--- a/jstests/capped5.js
+++ b/jstests/capped5.js
@@ -16,3 +16,35 @@ t.ensureIndex( { x : 1 } )
assert.eq( 52 , t.findOne( { x : 11 } ).z , "B1" );
assert.eq( 52 , t.findOne( { _id : 5 } ).z , "B2" );
+
+t.drop();
+db.createCollection( tn , {capped: true, size: 1024 * 1024 * 1 } );
+t.insert( { _id : 5 , x : 11 } );
+t.insert( { _id : 6 , x : 11 } );
+t.ensureIndex( { x:1 }, {unique:true, dropDups:true } );
+assert.eq( 0, db.system.indexes.count( {ns:"test."+tn} ) );
+assert.eq( 2, t.find().toArray().length );
+
+t.drop();
+db.createCollection( tn , {capped: true, size: 1024 * 1024 * 1 } );
+t.insert( { _id : 5 , x : 11 } );
+t.insert( { _id : 5 , x : 12 } );
+t.ensureIndex( { _id:1 } );
+assert.eq( 0, db.system.indexes.count( {ns:"test."+tn} ) );
+assert.eq( 2, t.find().toArray().length );
+
+t.drop();
+db.createCollection( tn , {capped: true, size: 1024 * 1024 * 1 } );
+t.insert( { _id : 5 , x : 11 } );
+t.insert( { _id : 6 , x : 12 } );
+t.ensureIndex( { x:1 }, {unique:true, dropDups:true } );
+assert.eq( 1, db.system.indexes.count( {ns:"test."+tn} ) );
+assert.eq( 2, t.find().hint( {x:1} ).toArray().length );
+
+// SERVER-525
+t.drop();
+db.createCollection( tn , {capped: true, size: 1024 * 1024 * 1 } );
+t.ensureIndex( { _id:1 } );
+t.insert( { _id : 5 , x : 11 } );
+t.insert( { _id : 5 , x : 12 } );
+assert.eq( 1, t.find().toArray().length );
diff --git a/jstests/clone/clonecollection.js b/jstests/clone/clonecollection.js
index 64d4ff0..123369f 100644
--- a/jstests/clone/clonecollection.js
+++ b/jstests/clone/clonecollection.js
@@ -23,6 +23,45 @@ waitParallel = function() {
assert.soon( function() { return doneParallel(); }, "parallel did not finish in time", 300000, 1000 );
}
+cloneNo = -1;
+startstartclone = function( spec ) {
+ spec = spec || "";
+ cloneNo++;
+ doParallel( "z = db.runCommand( {startCloneCollection:\"jstests_clonecollection.a\", from:\"localhost:" + ports[ 0 ] + "\"" + spec + " } ); print( \"clone_clone_clone_commandResult::" + cloneNo + "::\" + tojson( z , '' , true ) + \":::::\" );" );
+}
+
+finishstartclone = function() {
+ waitParallel();
+ // even after parallel shell finished, must wait for finishToken line to appear in log
+ assert.soon( function() {
+ raw = rawMongoProgramOutput().replace( /[\r\n]/gm , " " )
+ ret = raw.match( new RegExp( "clone_clone_clone_commandResult::" + cloneNo + "::(.*):::::" ) );
+ if ( ret == null ) {
+ return false;
+ }
+ ret = ret[ 1 ];
+ return true;
+ } );
+
+ eval( "ret = " + ret );
+
+ assert.commandWorked( ret );
+ return ret;
+}
+
+dofinishclonecmd = function( ret ) {
+ finishToken = ret.finishToken;
+ // Round-tripping through JS can corrupt the cursor ids we store as BSON
+ // Date elements. Date( 0 ) will correspond to a cursorId value of 0, which
+ // makes the db start scanning from the beginning of the collection.
+ finishToken.cursorId = new Date( 0 );
+ return t.runCommand( {finishCloneCollection:finishToken} );
+}
+
+finishclone = function( ret ) {
+ assert.commandWorked( dofinishclonecmd( ret ) );
+}
+
ports = allocatePorts( 2 );
f = startMongod( "--port", ports[ 0 ], "--dbpath", "/data/db/" + baseName + "_from", "--nohttpinterface", "--bind_ip", "127.0.0.1" ).getDB( baseName );
@@ -52,7 +91,7 @@ if ( t.system.indexes.find().count() != 2 ) {
}
assert.eq( 2, t.system.indexes.find().count(), "expected index missing" );
// Verify index works
-assert.eq( 50, t.a.find( { i: 50 } ).hint( { i: 1 } ).explain().startKey.i );
+assert.eq( 50, t.a.find( { i: 50 } ).hint( { i: 1 } ).explain().indexBounds[0][0].i );
assert.eq( 1, t.a.find( { i: 50 } ).hint( { i: 1 } ).toArray().length, "match length did not match expected" );
// Check that capped-ness is preserved on clone
@@ -71,16 +110,17 @@ t.a.drop();
for( i = 0; i < 100000; ++i ) {
f.a.save( { i: i } );
}
+assert.eq( 100000, f.a.count() );
-doParallel( "assert.commandWorked( db.cloneCollection( \"localhost:" + ports[ 0 ] + "\", \"a\", {i:{$gte:0}} ) );" );
+startstartclone( ", query:{i:{$gte:0}}" );
sleep( 200 );
f.a.save( { i: 200000 } );
f.a.save( { i: -1 } );
f.a.remove( { i: 0 } );
f.a.update( { i: 99998 }, { i: 99998, x: "y" } );
-assert( !doneParallel(), "test run invalid" );
-waitParallel();
+ret = finishstartclone();
+finishclone( ret );
assert.eq( 100000, t.a.find().count() );
assert.eq( 1, t.a.find( { i: 200000 } ).count() );
@@ -96,15 +136,16 @@ t.a.drop();
for( i = 0; i < 200000; ++i ) {
f.a.save( { i: i } );
}
+assert.eq( 200000, f.a.count() );
-doParallel( "assert.commandFailed( db.runCommand( { cloneCollection: \"jstests_clonecollection.a\", from: \"localhost:" + ports[ 0 ] + "\", logSizeMb:1 } ) );" );
+startstartclone( ", logSizeMb:1" );
+ret = finishstartclone();
-sleep( 200 );
for( i = 200000; i < 250000; ++i ) {
f.a.save( { i: i } );
}
-waitParallel();
+assert.commandFailed( dofinishclonecmd( ret ) );
// Make sure the same works with standard size op log.
f.a.drop();
@@ -113,15 +154,17 @@ t.a.drop();
for( i = 0; i < 200000; ++i ) {
f.a.save( { i: i } );
}
+assert.eq( 200000, f.a.count() );
-doParallel( "assert.commandWorked( db.cloneCollection( \"localhost:" + ports[ 0 ] + "\", \"a\" ) );" );
+startstartclone();
+ret = finishstartclone();
-sleep( 200 );
for( i = 200000; i < 250000; ++i ) {
f.a.save( { i: i } );
}
+assert.eq( 250000, f.a.count() );
-waitParallel();
+finishclone( ret );
assert.eq( 250000, t.a.find().count() );
// Test startCloneCollection and finishCloneCollection commands.
@@ -131,35 +174,17 @@ t.a.drop();
for( i = 0; i < 100000; ++i ) {
f.a.save( { i: i } );
}
+assert.eq( 100000, f.a.count() );
-doParallel( "z = db.runCommand( {startCloneCollection:\"jstests_clonecollection.a\", from:\"localhost:" + ports[ 0 ] + "\" } ); print( \"clone_clone_clone_commandResult:::::\" + tojson( z , '' , true ) + \":::::\" );" );
+startstartclone();
sleep( 200 );
f.a.save( { i: -1 } );
-waitParallel();
-// even after parallel shell finished, must wait for finishToken line to appear in log
-assert.soon( function() {
- raw = rawMongoProgramOutput().replace( /[\r\n]/gm , " " )
- ret = raw.match( /clone_clone_clone_commandResult:::::(.*):::::/ );
- if ( ret == null ) {
- return false;
- }
- ret = ret[ 1 ];
- return true;
- } );
-
-eval( "ret = " + ret );
-
-assert.commandWorked( ret );
+ret = finishstartclone();
assert.eq( 100001, t.a.find().count() );
f.a.save( { i: -2 } );
assert.eq( 100002, f.a.find().count() );
-finishToken = ret.finishToken;
-// Round-tripping through JS can corrupt the cursor ids we store as BSON
-// Date elements. Date( 0 ) will correspond to a cursorId value of 0, which
-// makes the db start scanning from the beginning of the collection.
-finishToken.cursorId = new Date( 0 );
-assert.commandWorked( t.runCommand( {finishCloneCollection:finishToken} ) );
+finishclone( ret );
assert.eq( 100002, t.a.find().count() );
diff --git a/jstests/copydb2.js b/jstests/copydb2.js
new file mode 100644
index 0000000..90ef943
--- /dev/null
+++ b/jstests/copydb2.js
@@ -0,0 +1,17 @@
+a = db.getSisterDB( "copydb2-test-a" );
+b = db.getSisterDB( "copydb2-test-b" );
+
+a.dropDatabase();
+b.dropDatabase();
+
+a.foo.save( { a : 1 } );
+
+a.addUser( "chevy" , "chase" );
+
+assert.eq( 1 , a.foo.count() , "A" );
+assert.eq( 0 , b.foo.count() , "B" );
+
+// SERVER-727
+a.copyDatabase( a._name , b._name, "" , "chevy" , "chase" );
+assert.eq( 1 , a.foo.count() , "C" );
+assert.eq( 1 , b.foo.count() , "D" );
diff --git a/jstests/cursor8.js b/jstests/cursor8.js
index 169bb5d..5ebd4f5 100644
--- a/jstests/cursor8.js
+++ b/jstests/cursor8.js
@@ -1,10 +1,19 @@
-db.f.drop();
-db.f.save( {} );
-db.f.save( {} );
-db.f.save( {} );
+t = db.f
+t.drop();
+t.save( {} );
+t.save( {} );
+t.save( {} );
db.getMongo().getDB( "admin" ).runCommand( {closeAllDatabases:1} );
-assert.eq( 0, db.runCommand( {cursorInfo:1} ).clientCursors_size );
-assert.eq( 2, db.f.find( {} ).limit( 2 ).toArray().length );
-assert.eq( 1, db.runCommand( {cursorInfo:1} ).clientCursors_size );
+function test( want , msg ){
+ var res = db.runCommand( { cursorInfo:1 } );
+ assert.eq( want , res.clientCursors_size , msg + " " + tojson( res ) );
+}
+
+test( 0 , "A1" );
+assert.eq( 3 , t.find().count() , "A1" );
+assert.eq( 3 , t.find( {} ).count() , "A2" );
+assert.eq( 2, t.find( {} ).limit( 2 ).itcount() , "A3" );
+test( 1 , "B1" );
+
diff --git a/jstests/dbadmin.js b/jstests/dbadmin.js
index c7b7bc8..8d0e7d1 100644
--- a/jstests/dbadmin.js
+++ b/jstests/dbadmin.js
@@ -4,9 +4,10 @@ t.save( { x : 1 } );
before = db._adminCommand( "serverStatus" )
if ( before.mem.supported ){
- db._adminCommand( "closeAllDatabases" );
+ cmdres = db._adminCommand( "closeAllDatabases" );
after = db._adminCommand( "serverStatus" );
- assert( before.mem.mapped > after.mem.mapped , "closeAllDatabases does something before:" + tojson( before ) + " after:" + tojson( after ) );
+ assert( before.mem.mapped > after.mem.mapped , "closeAllDatabases does something before:" + tojson( before.mem ) + " after:" + tojson( after.mem ) + " cmd res:" + tojson( cmdres ) );
+ print( before.mem.mapped + " -->> " + after.mem.mapped );
}
else {
print( "can't test serverStatus on this machine" );
@@ -17,6 +18,4 @@ t.save( { x : 1 } );
res = db._adminCommand( "listDatabases" );
assert( res.databases.length > 0 , "listDatabases 1" );
-print( "BEFORE: " + tojson( before ) );
-print( "AFTER : " + tojson( after ) );
// TODO: add more tests here
diff --git a/jstests/dbhash.js b/jstests/dbhash.js
new file mode 100644
index 0000000..101be18
--- /dev/null
+++ b/jstests/dbhash.js
@@ -0,0 +1,43 @@
+
+a = db.dbhasha;
+b = db.dbhashb;
+
+a.drop();
+b.drop();
+
+function gh( coll , mydb ){
+ if ( ! mydb ) mydb = db;
+ var x = mydb.runCommand( "dbhash" ).collections[coll.getName()];
+ if ( ! x )
+ return "";
+ return x;
+}
+
+function dbh( mydb ){
+ return mydb.runCommand( "dbhash" ).md5;
+}
+
+assert.eq( gh( a ) , gh( b ) , "A1" );
+
+a.insert( { _id : 5 } );
+assert.neq( gh( a ) , gh( b ) , "A2" );
+
+b.insert( { _id : 5 } );
+assert.eq( gh( a ) , gh( b ) , "A3" );
+
+dba = db.getSisterDB( "dbhasha" );
+dbb = db.getSisterDB( "dbhashb" );
+
+dba.dropDatabase();
+dbb.dropDatabase();
+
+assert.eq( gh( dba.foo , dba ) , gh( dbb.foo , dbb ) , "B1" );
+assert.eq( dbh( dba ) , dbh( dbb ) , "C1" );
+
+dba.foo.insert( { _id : 5 } );
+assert.neq( gh( dba.foo , dba ) , gh( dbb.foo , dbb ) , "B2" );
+assert.neq( dbh( dba ) , dbh( dbb ) , "C2" );
+
+dbb.foo.insert( { _id : 5 } );
+assert.eq( gh( dba.foo , dba ) , gh( dbb.foo , dbb ) , "B3" );
+assert.eq( dbh( dba ) , dbh( dbb ) , "C3" );
diff --git a/jstests/disk/directoryperdb.js b/jstests/disk/directoryperdb.js
new file mode 100644
index 0000000..a5fd18e
--- /dev/null
+++ b/jstests/disk/directoryperdb.js
@@ -0,0 +1,62 @@
+var baseDir = "jstests_disk_directoryper";
+var baseName = "directoryperdb"
+port = allocatePorts( 1 )[ 0 ];
+dbpath = "/data/db/" + baseDir + "/";
+
+var m = startMongod( "--directoryperdb", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( baseName );
+db[ baseName ].save( {} );
+assert.eq( 1, db[ baseName ].count() );
+
+checkDir = function( dir ) {
+ db.runCommand( {fsync:1} );
+ files = listFiles( dir );
+ found = false;
+ for( f in files ) {
+ if ( new RegExp( baseName ).test( files[ f ].name ) ) {
+ found = true;
+ assert( files[ f ].isDirectory, "file not directory" );
+ }
+ }
+ assert( found, "no directory" );
+
+ files = listFiles( dir + baseName );
+ for( f in files ) {
+ assert( new RegExp( baseName + "/" + baseName + "." ).test( files[ f ].name ) );
+ }
+}
+checkDir( dbpath );
+
+// file iterator
+assert( m.getDBs().totalSize > 0, "bad size calc" );
+
+// repair
+db.runCommand( {repairDatabase:1, backupOriginalFiles:true} );
+checkDir( dbpath );
+files = listFiles( dbpath );
+for( f in files ) {
+ if ( new RegExp( "^" + dbpath + "backup_" ).test( files[ f ].name ) ) {
+ backupDir = files[ f ].name + "/";
+ }
+}
+checkDir( backupDir );
+assert.eq( 1, db[ baseName ].count() );
+
+// tool test
+stopMongod( port );
+
+externalPath = "/data/db/" + baseDir + "_external/";
+
+runMongoProgram( "mongodump", "--dbpath", dbpath, "--directoryperdb", "--out", externalPath );
+resetDbpath( dbpath );
+runMongoProgram( "mongorestore", "--dbpath", dbpath, "--directoryperdb", "--dir", externalPath );
+m = startMongoProgram( "mongod", "--directoryperdb", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( baseName );
+checkDir( dbpath );
+assert.eq( 1, db[ baseName ].count() );
+assert( m.getDBs().totalSize > 0, "bad size calc" );
+
+// drop db test
+db.dropDatabase();
+files = listFiles( dbpath );
+files.forEach( function( f ) { assert( !new RegExp( baseName ).test( f.name ), "drop database - dir not cleared" ); } );
diff --git a/jstests/disk/diskfull.js b/jstests/disk/diskfull.js
index 7f75266..8057174 100644
--- a/jstests/disk/diskfull.js
+++ b/jstests/disk/diskfull.js
@@ -15,6 +15,9 @@ if ( doIt ) {
port = allocatePorts( 1 )[ 0 ];
m = startMongoProgram( "mongod", "--port", port, "--dbpath", "/data/db/diskfulltest", "--nohttpinterface", "--bind_ip", "127.0.0.1" );
m.getDB( "diskfulltest" ).getCollection( "diskfulltest" ).save( { a: 6 } );
- assert.soon( function() { return rawMongoProgramOutput().match( /dbexit: really exiting now/ ); }, "didn't see 'really exiting now'" );
- assert( !rawMongoProgramOutput().match( /Got signal/ ), "saw 'Got signal', not expected. Output: " + rawMongoProgramOutput() );
+ assert.soon( function() { return rawMongoProgramOutput().match( /file allocation failure/ ); }, "didn't see 'file allocation failure'" );
+ assert.soon( function() { return rawMongoProgramOutput().match( /Caught Assertion in insert , continuing/ ); }, "didn't see 'Caught Assertion...'" );
+ sleep( 3000 );
+ m2 = new Mongo( m.host );
+ printjson( m2.getDBs() );
}
diff --git a/jstests/disk/newcollection.js b/jstests/disk/newcollection.js
new file mode 100644
index 0000000..944ad1c
--- /dev/null
+++ b/jstests/disk/newcollection.js
@@ -0,0 +1,13 @@
+// SERVER-594 test
+
+port = allocatePorts( 1 )[ 0 ]
+var baseName = "jstests_disk_newcollection";
+var m = startMongod( "--noprealloc", "--smallfiles", "--port", port, "--dbpath", "/data/db/" + baseName );
+db = m.getDB( "test" );
+
+db.createCollection( baseName, {size:15.9*1024*1024} );
+db.baseName.drop();
+
+size = m.getDBs().totalSize;
+db.baseName.save( {} );
+assert.eq( size, m.getDBs().totalSize );
diff --git a/jstests/disk/preallocate.js b/jstests/disk/preallocate.js
index 69f9a47..c3c9bd0 100644
--- a/jstests/disk/preallocate.js
+++ b/jstests/disk/preallocate.js
@@ -1,4 +1,4 @@
-port = allocatePorts( 1 )[ 0 ]
+port = allocatePorts( 1 )[ 0 ];
var baseName = "jstests_preallocate";
@@ -10,12 +10,14 @@ var m = startMongod( "--port", port, "--dbpath", "/data/db/" + baseName );
m.getDB( baseName ).createCollection( baseName + "1" );
-vs = vsize();
-
stopMongod( port );
var m = startMongoProgram( "mongod", "--port", port, "--dbpath", "/data/db/" + baseName );
+size = m.getDBs().totalSize;
+
m.getDB( baseName ).createCollection( baseName + "2" );
-assert.eq( vs, vsize() );
+sleep( 2000 ); // give prealloc a chance
+
+assert.eq( size, m.getDBs().totalSize );
diff --git a/jstests/disk/repair.js b/jstests/disk/repair.js
new file mode 100644
index 0000000..6c8d81b
--- /dev/null
+++ b/jstests/disk/repair.js
@@ -0,0 +1,18 @@
+var baseName = "jstests_disk_repair";
+
+port = allocatePorts( 1 )[ 0 ];
+dbpath = "/data/db/" + baseName + "/";
+repairpath = dbpath + "repairDir/"
+
+resetDbpath( dbpath );
+resetDbpath( repairpath );
+
+m = startMongoProgram( "mongod", "--port", port, "--dbpath", dbpath, "--repairpath", repairpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( baseName );
+db[ baseName ].save( {} );
+db.runCommand( {repairDatabase:1, backupOriginalFiles:true} );
+
+files = listFiles( dbpath );
+for( f in files ) {
+ assert( ! new RegExp( "^" + dbpath + "backup_" ).test( files[ f ].name ), "backup dir in dbpath" );
+}
diff --git a/jstests/drop.js b/jstests/drop.js
index b233409..1bd539e 100644
--- a/jstests/drop.js
+++ b/jstests/drop.js
@@ -18,4 +18,4 @@ assert.eq( 1, db.system.indexes.find( {ns:"test.jstests_drop"} ).count() , "G" )
// make sure we can still use it
f.save( {} );
-assert.eq( 1, f.find().hint( {_id:new ObjectId( "000000000000000000000000" )} ).toArray().length , "H" );
+assert.eq( 1, f.find().hint( "_id_" ).toArray().length , "H" );
diff --git a/jstests/dropIndex.js b/jstests/dropIndex.js
new file mode 100644
index 0000000..a6e5f46
--- /dev/null
+++ b/jstests/dropIndex.js
@@ -0,0 +1,16 @@
+
+t = db.dropIndex;
+t.drop();
+
+t.insert( { _id : 1 , a : 2 , b : 3 } );
+assert.eq( 1 , t.getIndexes().length , "A1" );
+
+t.ensureIndex( { a : 1 } );
+t.ensureIndex( { b : 1 } );
+assert.eq( 3 , t.getIndexes().length , "A2" );
+
+x = db._dbCommand( { dropIndexes: t.getName() , index : t._genIndexName( { a : 1 } ) } );
+assert.eq( 2 , t.getIndexes().length , "B1" );
+
+x = db._dbCommand( { dropIndexes: t.getName() , index : { b : 1 } } )
+assert.eq( 1 , t.getIndexes().length , "B2" );
diff --git a/jstests/exists2.js b/jstests/exists2.js
new file mode 100644
index 0000000..a9b4d1e
--- /dev/null
+++ b/jstests/exists2.js
@@ -0,0 +1,14 @@
+
+t = db.exists2;
+t.drop();
+
+t.save( { a : 1 , b : 1 } )
+t.save( { a : 1 , b : 1 , c : 1 } )
+
+assert.eq( 2 , t.find().itcount() , "A1" );
+assert.eq( 2 , t.find( { a : 1 , b : 1 } ).itcount() , "A2" );
+assert.eq( 1 , t.find( { a : 1 , b : 1 , c : { "$exists" : true } } ).itcount() , "A3" );
+
+t.ensureIndex( { a : 1 , b : 1 , c : 1 } )
+assert.eq( 1 , t.find( { a : 1 , b : 1 , c : { "$exists" : true } } ).itcount() , "B1" );
+
diff --git a/jstests/explain2.js b/jstests/explain2.js
new file mode 100644
index 0000000..5a36552
--- /dev/null
+++ b/jstests/explain2.js
@@ -0,0 +1,27 @@
+
+t = db.explain2
+t.drop();
+
+t.ensureIndex( { a : 1 , b : 1 } );
+
+for ( i=1; i<10; i++ ){
+ t.insert( { _id : i , a : i , b : i , c : i } );
+}
+
+function go( q , c , b , o ){
+ var e = t.find( q ).explain();
+ assert.eq( c , e.n , "count " + tojson( q ) )
+ assert.eq( b , e.nscanned , "nscanned " + tojson( q ) )
+ assert.eq( o , e.nscannedObjects , "nscannedObjects " + tojson( q ) )
+}
+
+q = { a : { $gt : 3 } }
+go( q , 6 , 7 , 6 );
+
+q.b = 5
+go( q , 1 , 6 , 1 );
+
+delete q.b
+q.c = 5
+go( q , 1 , 7 , 6 );
+
diff --git a/jstests/find6.js b/jstests/find6.js
index baa5969..c4efd3b 100644
--- a/jstests/find6.js
+++ b/jstests/find6.js
@@ -9,3 +9,33 @@ assert.eq( 2 , t.find().count() , "A" );
assert.eq( 1 , t.find( { b : null } ).count() , "B" );
assert.eq( 1 , t.find( "function() { return this.b == null; }" ).itcount() , "C" );
assert.eq( 1 , t.find( "function() { return this.b == null; }" ).count() , "D" );
+
+/* test some stuff with dot array notation */
+q = db.find6a;
+q.drop();
+q.insert( { "a" : [ { "0" : 1 } ] } );
+q.insert( { "a" : [ { "0" : 2 } ] } );
+q.insert( { "a" : [ 1 ] } );
+q.insert( { "a" : [ 9, 1 ] } );
+
+function f() {
+
+ assert.eq( 2, q.find( { 'a.0' : 1 } ).count(), "da1");
+ assert.eq( 2, q.find( { 'a.0' : 1 } ).count(), "da2");
+
+ assert.eq( 1, q.find( { 'a.0' : { $gt : 8 } } ).count(), "da3");
+ assert.eq( 0, q.find( { 'a.0' : { $lt : 0 } } ).count(), "da4");
+
+}
+
+for( var pass = 0; pass <= 1 ; pass++ ) {
+ f();
+ q.ensureIndex({a:1});
+}
+
+t = db.multidim;
+t.drop();
+t.insert({"a" : [ [ ], 1, [ 3, 4 ] ] });
+assert.eq(1, t.find({"a.2":[3,4]}).count(), "md1");
+assert.eq(1, t.find({"a.2.1":4}).count(), "md2");
+assert.eq(0, t.find({"a.2.1":3}).count(), "md3");
diff --git a/jstests/find7.js b/jstests/find7.js
new file mode 100644
index 0000000..ca4c7d4
--- /dev/null
+++ b/jstests/find7.js
@@ -0,0 +1,8 @@
+t = db.find7;
+t.drop();
+
+x = { "_id" : { "d" : 3649, "w" : "signed" }, "u" : { "3649" : 5 } };
+t.insert(x );
+assert.eq( x , t.findOne() , "A1" );
+assert.eq( x , t.findOne( { _id : x._id } ) , "A2" );
+
diff --git a/jstests/geo1.js b/jstests/geo1.js
new file mode 100644
index 0000000..8f31e8e
--- /dev/null
+++ b/jstests/geo1.js
@@ -0,0 +1,41 @@
+
+t = db.geo1
+t.drop();
+
+idx = { loc : "2d" , zip : 1 }
+
+t.insert( { zip : "06525" , loc : [ 41.352964 , 73.01212 ] } )
+t.insert( { zip : "10024" , loc : [ 40.786387 , 73.97709 ] } )
+t.insert( { zip : "94061" , loc : [ 37.463911 , 122.23396 ] } )
+assert.isnull( db.getLastError() )
+
+// test "2d" has to be first
+assert.eq( 1 , t.getIndexKeys().length , "S1" );
+t.ensureIndex( { zip : 1 , loc : "2d" } );
+assert.eq( 1 , t.getIndexKeys().length , "S2" );
+
+t.ensureIndex( idx );
+assert.eq( 2 , t.getIndexKeys().length , "S3" );
+
+assert.eq( 3 , t.count() , "B1" );
+t.insert( { loc : [ 200 , 200 ] } )
+assert( db.getLastError() , "B2" )
+assert.eq( 3 , t.count() , "B3" );
+
+// test normal access
+
+wb = t.findOne( { zip : "06525" } )
+assert( wb , "C1" );
+
+assert.eq( "06525" , t.find( { loc : wb.loc } ).hint( { "$natural" : 1 } )[0].zip , "C2" )
+assert.eq( "06525" , t.find( { loc : wb.loc } )[0].zip , "C3" )
+assert.eq( 1 , t.find( { loc : wb.loc } ).explain().nscanned , "C4" )
+
+// test config options
+
+t.drop();
+
+t.ensureIndex( { loc : "2d" } , { min : -500 , max : 500 , bits : 4 } );
+t.insert( { loc : [ 200 , 200 ] } )
+assert.isnull( db.getLastError() , "D1" )
+
diff --git a/jstests/geo2.js b/jstests/geo2.js
new file mode 100644
index 0000000..b9452c8
--- /dev/null
+++ b/jstests/geo2.js
@@ -0,0 +1,43 @@
+
+t = db.geo2
+t.drop();
+
+n = 1
+for ( var x=-100; x<100; x+=2 ){
+ for ( var y=-100; y<100; y+=2 ){
+ t.insert( { _id : n++ , loc : [ x , y ] } )
+ }
+}
+
+t.ensureIndex( { loc : "2d" } )
+
+fast = db.runCommand( { geoNear : t.getName() , near : [ 50 , 50 ] , num : 10 } );
+slow = db.runCommand( { geoNear : t.getName() , near : [ 50 , 50 ] , num : 10 , start : "11" } );
+
+printjson(fast.stats);
+printjson(slow.stats);
+
+v = "\n" + tojson( fast ) + "\n" + tojson( slow );
+
+assert.lt( fast.stats.nscanned * 10 , slow.stats.nscanned , "A1" + v );
+assert.lt( fast.stats.objectsLoaded , slow.stats.objectsLoaded , "A2" + v );
+assert.eq( fast.stats.avgDistance , slow.stats.avgDistance , "A3" + v );
+
+function a( cur ){
+ var total = 0;
+ var outof = 0;
+ while ( cur.hasNext() ){
+ var o = cur.next();
+ total += Geo.distance( [ 50 , 50 ] , o.loc );
+ outof++;
+ }
+ return total/outof;
+}
+
+assert.close( fast.stats.avgDistance , a( t.find( { loc : { $near : [ 50 , 50 ] } } ).limit(10) ) , "B1" )
+assert.close( 1.33333 , a( t.find( { loc : { $near : [ 50 , 50 ] } } ).limit(3) ) , "B2" );
+assert.close( fast.stats.avgDistance , a( t.find( { loc : { $near : [ 50 , 50 ] } } ).limit(10) ) , "B3" );
+
+printjson( t.find( { loc : { $near : [ 50 , 50 ] } } ).explain() )
+
+
diff --git a/jstests/geo3.js b/jstests/geo3.js
new file mode 100644
index 0000000..6bf27f9
--- /dev/null
+++ b/jstests/geo3.js
@@ -0,0 +1,87 @@
+
+t = db.geo3
+t.drop();
+
+n = 1
+for ( var x=-100; x<100; x+=2 ){
+ for ( var y=-100; y<100; y+=2 ){
+ t.insert( { _id : n++ , loc : [ x , y ] , a : Math.abs( x ) % 5 , b : Math.abs( y ) % 5 } )
+ }
+}
+
+
+t.ensureIndex( { loc : "2d" } )
+
+fast = db.runCommand( { geoNear : t.getName() , near : [ 50 , 50 ] , num : 10 } );
+
+//printjson( fast.stats );
+
+slow = db.runCommand( { geoNear : t.getName() , near : [ 50 , 50 ] , num : 10 , start : "11" } );
+
+//printjson( slow.stats );
+
+assert.lt( fast.stats.nscanned * 10 , slow.stats.nscanned , "A1" );
+assert.lt( fast.stats.objectsLoaded , slow.stats.objectsLoaded , "A2" );
+assert.eq( fast.stats.avgDistance , slow.stats.avgDistance , "A3" );
+
+// test filter
+
+filtered1 = db.runCommand( { geoNear : t.getName() , near : [ 50 , 50 ] , num : 10 , query : { a : 2 } } );
+assert.eq( 10 , filtered1.results.length , "B1" );
+filtered1.results.forEach( function(z){ assert.eq( 2 , z.obj.a , "B2: " + tojson( z ) ); } )
+//printjson( filtered1.stats );
+
+function avgA( q , len ){
+ if ( ! len )
+ len = 10;
+ var realq = { loc : { $near : [ 50 , 50 ] } };
+ if ( q )
+ Object.extend( realq , q );
+ var as =
+ t.find( realq ).limit(len).map(
+ function(z){
+ return z.a;
+ }
+ );
+ assert.eq( len , as.length , "length in avgA" );
+ return Array.avg( as );
+}
+
+function testFiltering( msg ){
+ assert.gt( 2 , avgA( {} ) , msg + " testFiltering 1 " );
+ assert.eq( 2 , avgA( { a : 2 } ) , msg + " testFiltering 2 " );
+ assert.eq( 4 , avgA( { a : 4 } ) , msg + " testFiltering 3 " );
+}
+
+testFiltering( "just loc" );
+
+t.dropIndex( { loc : "2d" } )
+assert.eq( 1 , t.getIndexKeys().length , "setup 3a" )
+t.ensureIndex( { loc : "2d" , a : 1 } )
+assert.eq( 2 , t.getIndexKeys().length , "setup 3b" )
+
+filtered2 = db.runCommand( { geoNear : t.getName() , near : [ 50 , 50 ] , num : 10 , query : { a : 2 } } );
+assert.eq( 10 , filtered2.results.length , "B3" );
+filtered2.results.forEach( function(z){ assert.eq( 2 , z.obj.a , "B4: " + tojson( z ) ); } )
+
+assert.eq( filtered1.stats.avgDistance , filtered2.stats.avgDistance , "C1" )
+assert.eq( filtered1.stats.nscanned , filtered2.stats.nscanned , "C3" )
+assert.gt( filtered1.stats.objectsLoaded , filtered2.stats.objectsLoaded , "C3" )
+
+testFiltering( "loc and a" );
+
+t.dropIndex( { loc : "2d" , a : 1 } )
+assert.eq( 1 , t.getIndexKeys().length , "setup 4a" )
+t.ensureIndex( { loc : "2d" , b : 1 } )
+assert.eq( 2 , t.getIndexKeys().length , "setup 4b" )
+
+testFiltering( "loc and b" );
+
+
+q = { loc : { $near : [ 50 , 50 ] } }
+assert.eq( 100 , t.find( q ).limit(100).itcount() , "D1" )
+assert.eq( 100 , t.find( q ).limit(100).count() , "D2" )
+
+assert.eq( 20 , t.find( q ).limit(20).itcount() , "D3" )
+assert.eq( 20 , t.find( q ).limit(20).size() , "D4" )
+
diff --git a/jstests/geo4.js b/jstests/geo4.js
new file mode 100644
index 0000000..73b4020
--- /dev/null
+++ b/jstests/geo4.js
@@ -0,0 +1,10 @@
+var t = db.geo4;
+t.drop();
+
+t.insert( { zip : "06525" , loc : [ 41.352964 , 73.01212 ] } );
+
+t.ensureIndex( { loc : "2d" }, { bits : 33 } );
+assert.eq( db.getLastError() , "can't have more than 32 bits in geo index" , "a" );
+
+t.ensureIndex( { loc : "2d" }, { bits : 32 } );
+assert( !db.getLastError(), "b" );
diff --git a/jstests/geo5.js b/jstests/geo5.js
new file mode 100644
index 0000000..67b00f8
--- /dev/null
+++ b/jstests/geo5.js
@@ -0,0 +1,18 @@
+t = db.geo5;
+t.drop();
+
+t.insert( { p : [ 0,0 ] } )
+t.ensureIndex( { p : "2d" } )
+
+res = t.runCommand( "geoNear" , { near : [1,1] } );
+assert.eq( 1 , res.results.length , "A1" );
+
+t.insert( { p : [ 1,1 ] } )
+t.insert( { p : [ -1,-1 ] } )
+res = t.runCommand( "geoNear" , { near : [50,50] } );
+assert.eq( 3 , res.results.length , "A2" );
+
+t.insert( { p : [ -1,-1 ] } )
+res = t.runCommand( "geoNear" , { near : [50,50] } );
+assert.eq( 4 , res.results.length , "A3" );
+
diff --git a/jstests/geo6.js b/jstests/geo6.js
new file mode 100644
index 0000000..47e3bf8
--- /dev/null
+++ b/jstests/geo6.js
@@ -0,0 +1,23 @@
+
+t = db.geo6;
+t.drop();
+
+t.ensureIndex( { loc : "2d" } );
+
+assert.eq( 0 , t.find().itcount() , "pre0" );
+assert.eq( 0 , t.find( { loc : { $near : [50,50] } } ).itcount() , "pre1" )
+
+t.insert( { _id : 1 , loc : [ 1 , 1 ] } )
+t.insert( { _id : 2 , loc : [ 1 , 2 ] } )
+t.insert( { _id : 3 } )
+
+assert.eq( 3 , t.find().itcount() , "A1" )
+assert.eq( 2 , t.find().hint( { loc : "2d" } ).itcount() , "A2" )
+assert.eq( 2 , t.find( { loc : { $near : [50,50] } } ).itcount() , "A3" )
+
+assert.eq( 1 , t.find( { loc : { $near : [50,50] } } ).sort( { _id : 1 } ).next()._id , "B1" )
+assert.eq( 2 , t.find( { loc : { $near : [50,50] } } ).sort( { _id : -1 } ).next()._id , "B1" )
+
+
+t.insert( { _id : 4 , loc : [] } )
+assert.eq( 4 , t.find().itcount() , "C1" )
diff --git a/jstests/geo7.js b/jstests/geo7.js
new file mode 100644
index 0000000..c220da5
--- /dev/null
+++ b/jstests/geo7.js
@@ -0,0 +1,20 @@
+
+t = db.geo7;
+t.drop();
+
+t.insert({_id:1,y:[1,1]})
+t.insert({_id:2,y:[1,1],z:3})
+t.insert({_id:3,y:[1,1],z:4})
+t.insert({_id:4,y:[1,1],z:5})
+
+t.ensureIndex({y:"2d",z:1})
+
+assert.eq( 1 , t.find({y:[1,1],z:3}).itcount() , "A1" );
+
+t.dropIndex({y:"2d",z:1})
+
+t.ensureIndex({y:"2d"})
+assert.eq( 1 , t.find({y:[1,1],z:3}).itcount() , "A2" );
+
+t.insert( { _id : 5 , y : 5 } );
+assert.eq( 5 , t.findOne( { y : 5 } )._id , "B1" );
diff --git a/jstests/geo8.js b/jstests/geo8.js
new file mode 100644
index 0000000..301f3bc
--- /dev/null
+++ b/jstests/geo8.js
@@ -0,0 +1,13 @@
+
+t = db.geo8
+t.drop()
+
+t.insert( { loc : [ 5 , 5 ] } )
+t.insert( { loc : [ 5 , 6 ] } )
+t.insert( { loc : [ 5 , 7 ] } )
+t.insert( { loc : [ 4 , 5 ] } )
+t.insert( { loc : [ 100 , 100 ] } )
+
+t.ensureIndex( { loc : "2d" } )
+
+t.runCommand( "geoWalk" );
diff --git a/jstests/geo9.js b/jstests/geo9.js
new file mode 100644
index 0000000..8b6510f
--- /dev/null
+++ b/jstests/geo9.js
@@ -0,0 +1,28 @@
+
+t = db.geo9
+t.drop();
+
+t.save( { _id : 1 , a : [ 10 , 10 ] , b : [ 50 , 50 ] } )
+t.save( { _id : 2 , a : [ 11 , 11 ] , b : [ 51 , 52 ] } )
+t.save( { _id : 3 , a : [ 12 , 12 ] , b : [ 52 , 52 ] } )
+
+t.save( { _id : 4 , a : [ 50 , 50 ] , b : [ 10 , 10 ] } )
+t.save( { _id : 5 , a : [ 51 , 51 ] , b : [ 11 , 11 ] } )
+t.save( { _id : 6 , a : [ 52 , 52 ] , b : [ 12 , 12 ] } )
+
+t.ensureIndex( { a : "2d" } )
+t.ensureIndex( { b : "2d" } )
+
+function check( field ){
+ var q = {}
+ q[field] = { $near : [ 11 , 11 ] }
+ arr = t.find( q ).limit(3).map(
+ function(z){
+ return Geo.distance( [ 11 , 11 ] , z[field] );
+ }
+ );
+ assert.eq( 2 * Math.sqrt( 2 ) , Array.sum( arr ) , "test " + field );
+}
+
+check( "a" )
+check( "b" )
diff --git a/jstests/geo_box1.js b/jstests/geo_box1.js
new file mode 100644
index 0000000..5ef3351
--- /dev/null
+++ b/jstests/geo_box1.js
@@ -0,0 +1,43 @@
+
+t = db.geo_box1;
+t.drop();
+
+num = 0;
+for ( x=0; x<=20; x++ ){
+ for ( y=0; y<=20; y++ ){
+ o = { _id : num++ , loc : [ x , y ] }
+ t.save( o )
+ }
+}
+
+t.ensureIndex( { loc : "2d" } );
+
+searches = [
+ [ [ 1 , 2 ] , [ 4 , 5 ] ] ,
+ [ [ 1 , 1 ] , [ 2 , 2 ] ] ,
+ [ [ 0 , 2 ] , [ 4 , 5 ] ] ,
+ [ [ 1 , 1 ] , [ 2 , 8 ] ] ,
+];
+
+
+for ( i=0; i<searches.length; i++ ){
+ b = searches[i];
+ //printjson( b );
+
+ q = { loc : { $within : { $box : b } } }
+ numWanetd = ( 1 + b[1][0] - b[0][0] ) * ( 1 + b[1][1] - b[0][1] );
+ assert.eq( numWanetd , t.find(q).itcount() , "itcount: " + tojson( q ) );
+ printjson( t.find(q).explain() )
+}
+
+
+
+assert.eq( 0 , t.find( { loc : { $within : { $box : [ [100 , 100 ] , [ 110 , 110 ] ] } } } ).itcount() , "E1" )
+assert.eq( 0 , t.find( { loc : { $within : { $box : [ [100 , 100 ] , [ 110 , 110 ] ] } } } ).count() , "E2" )
+
+
+assert.eq( num , t.find( { loc : { $within : { $box : [ [ 0 , 0 ] , [ 110 , 110 ] ] } } } ).count() , "E3" )
+assert.eq( num , t.find( { loc : { $within : { $box : [ [ 0 , 0 ] , [ 110 , 110 ] ] } } } ).itcount() , "E4" )
+
+assert.eq( 57 , t.find( { loc : { $within : { $box : [ [ 0 , 0 ] , [ 110 , 110 ] ] } } } ).limit(57).itcount() , "E5" )
+
diff --git a/jstests/geo_box2.js b/jstests/geo_box2.js
new file mode 100644
index 0000000..2aa65d0
--- /dev/null
+++ b/jstests/geo_box2.js
@@ -0,0 +1,19 @@
+
+t = db.geo_box2;
+
+t.drop()
+
+for (i=1; i<10; i++) {
+ for(j=1; j<10; j++) {
+ t.insert({loc : [i,j]});
+ }
+}
+
+t.ensureIndex({"loc" : "2d"} )
+assert.eq( 9 , t.find({loc : {$within : {$box : [[4,4],[6,6]]}}}).itcount() , "A1" );
+
+t.dropIndex( { "loc" : "2d" } )
+
+t.ensureIndex({"loc" : "2d"} , {"min" : 0, "max" : 10})
+assert.eq( 9 , t.find({loc : {$within : {$box : [[4,4],[6,6]]}}}).itcount() , "B1" );
+
diff --git a/jstests/geo_circle1.js b/jstests/geo_circle1.js
new file mode 100644
index 0000000..9208511
--- /dev/null
+++ b/jstests/geo_circle1.js
@@ -0,0 +1,50 @@
+
+t = db.geo_circle1;
+t.drop();
+
+searches = [
+ [ [ 5 , 5 ] , 3 ] ,
+ [ [ 5 , 5 ] , 1 ] ,
+ [ [ 5 , 5 ] , 5 ] ,
+ [ [ 0 , 5 ] , 5 ] ,
+];
+correct = searches.map( function(z){ return []; } );
+
+num = 0;
+
+for ( x=0; x<=20; x++ ){
+ for ( y=0; y<=20; y++ ){
+ o = { _id : num++ , loc : [ x , y ] }
+ t.save( o )
+ for ( i=0; i<searches.length; i++ )
+ if ( Geo.distance( [ x , y ] , searches[i][0] ) <= searches[i][1] )
+ correct[i].push( o );
+ }
+}
+
+t.ensureIndex( { loc : "2d" } );
+
+for ( i=0; i<searches.length; i++ ){
+ //print( tojson( searches[i] ) + "\t" + correct[i].length )
+ q = { loc : { $within : { $center : searches[i] } } }
+
+ //correct[i].forEach( printjson )
+ //printjson( q );
+ //t.find( q ).forEach( printjson )
+
+ //printjson( Array.sort( correct[i].map( function(z){ return z._id; } ) ) )
+ //printjson( Array.sort( t.find(q).map( function(z){ return z._id; } ) ) )
+
+ assert.eq( correct[i].length , t.find( q ).itcount() , "itcount : " + tojson( searches[i] ) );
+ assert.eq( correct[i].length , t.find( q ).itcount() , "count : " + tojson( searches[i] ) );
+ assert.gt( correct[i].length * 2 , t.find(q).explain().nscanned , "nscanned : " + tojson( searches[i] ) )
+}
+
+
+
+
+
+
+
+
+
diff --git a/jstests/geoa.js b/jstests/geoa.js
new file mode 100644
index 0000000..3081f6c
--- /dev/null
+++ b/jstests/geoa.js
@@ -0,0 +1,12 @@
+
+t = db.geoa
+t.drop();
+
+t.save( { _id : 1 , a : { loc : [ 5 , 5 ] } } )
+t.save( { _id : 2 , a : { loc : [ 6 , 6 ] } } )
+t.save( { _id : 3 , a : { loc : [ 7 , 7 ] } } )
+
+t.ensureIndex( { "a.loc" : "2d" } );
+
+cur = t.find( { "a.loc" : { $near : [ 6 , 6 ] } } );
+assert.eq( 2 , cur.next()._id , "A1" );
diff --git a/jstests/geob.js b/jstests/geob.js
new file mode 100644
index 0000000..0dcc265
--- /dev/null
+++ b/jstests/geob.js
@@ -0,0 +1,35 @@
+var t = db.geob;
+t.drop();
+
+var a = {p: [0, 0]};
+var b = {p: [1, 0]};
+var c = {p: [3, 4]};
+var d = {p: [0, 6]};
+
+t.save(a);
+t.save(b);
+t.save(c);
+t.save(d);
+t.ensureIndex({p: "2d"});
+
+var res = t.runCommand("geoNear", {near: [0,0]});
+assert.close(3, res.stats.avgDistance, "A");
+
+assert.close(0, res.results[0].dis, "B1");
+assert.eq(a._id, res.results[0].obj._id, "B2");
+
+assert.close(1, res.results[1].dis, "C1");
+assert.eq(b._id, res.results[1].obj._id, "C2");
+
+assert.close(5, res.results[2].dis, "D1");
+assert.eq(c._id, res.results[2].obj._id, "D2");
+
+assert.close(6, res.results[3].dis, "E1");
+assert.eq(d._id, res.results[3].obj._id, "E2");
+
+res = t.runCommand("geoNear", {near: [0,0], distanceMultiplier: 2});
+assert.close(6, res.stats.avgDistance, "F");
+assert.close(0, res.results[0].dis, "G");
+assert.close(2, res.results[1].dis, "H");
+assert.close(10, res.results[2].dis, "I");
+assert.close(12, res.results[3].dis, "J");
diff --git a/jstests/geoc.js b/jstests/geoc.js
new file mode 100644
index 0000000..8b01780
--- /dev/null
+++ b/jstests/geoc.js
@@ -0,0 +1,24 @@
+
+t = db.geoc;
+t.drop()
+
+N = 1000;
+
+for (var i=0; i<N; i++) t.insert({loc:[100+Math.random(), 100+Math.random()], z:0})
+for (var i=0; i<N; i++) t.insert({loc:[0+Math.random(), 0+Math.random()], z:1})
+for (var i=0; i<N; i++) t.insert({loc:[-100+Math.random(), -100+Math.random()], z:2})
+
+t.ensureIndex({loc:'2d'})
+
+function test( z , l ){
+ assert.lt( 0 , t.find({loc:{$near:[100,100]}, z:z}).limit(l).itcount() , "z: " + z + " l: " + l );
+}
+
+test( 1 , 1 );
+test( 1 , 2 );
+test( 2 , 2 );
+test( 2 , 10 );
+test( 2 , 1000 );
+test( 2 , 100000 );
+test( 2 , 10000000 );
+
diff --git a/jstests/group2.js b/jstests/group2.js
index f687e88..a8e6653 100644
--- a/jstests/group2.js
+++ b/jstests/group2.js
@@ -28,11 +28,11 @@ delete cmd.key
cmd["$keyf"] = function(x){ return { a : x.a }; };
result2 = t.group( cmd );
-assert.eq( result , result2 );
+assert.eq( result , result2, "check result2" );
delete cmd.$keyf
cmd["keyf"] = function(x){ return { a : x.a }; };
result3 = t.group( cmd );
-assert.eq( result , result3 );
+assert.eq( result , result3, "check result3" );
diff --git a/jstests/group3.js b/jstests/group3.js
index afa32f1..d113b9d 100644
--- a/jstests/group3.js
+++ b/jstests/group3.js
@@ -1,4 +1,4 @@
-t = db.group2;
+t = db.group3;
t.drop();
t.save({a: 1});
diff --git a/jstests/hint1.js b/jstests/hint1.js
index 416eb4a..c222aa3 100644
--- a/jstests/hint1.js
+++ b/jstests/hint1.js
@@ -6,5 +6,5 @@ p.save( { ts: new Date( 1 ), cls: "entry", verticals: "alleyinsider", live: true
p.ensureIndex( { ts: 1 } );
e = p.find( { live: true, ts: { $lt: new Date( 1234119308272 ) }, cls: "entry", verticals: " alleyinsider" } ).sort( { ts: -1 } ).hint( { ts: 1 } ).explain();
-assert.eq( e.startKey.ts.getTime(), new Date( 1234119308272 ).getTime() , "A" );
-assert.eq( 0 , e.endKey.ts.getTime() , "B" );
+assert.eq( e.indexBounds[0][0].ts.getTime(), new Date( 1234119308272 ).getTime() , "A" );
+assert.eq( 0 , e.indexBounds[0][1].ts.getTime() , "B" );
diff --git a/jstests/in.js b/jstests/in.js
index 5442bbe..b8ba159 100644
--- a/jstests/in.js
+++ b/jstests/in.js
@@ -17,3 +17,4 @@ assert.eq( 2 , t.find( { a : { $in : [ 1 , 2 ] } } ).itcount() , "F" );
assert.eq( 0 , t.find( { a : { $in : [] } } ).itcount() , "G" );
+assert.eq( 1 , t.find( { a : { $gt: 1, $in : [ 2 ] } } ).itcount() , "E" ); \ No newline at end of file
diff --git a/jstests/in3.js b/jstests/in3.js
new file mode 100644
index 0000000..1ec53ca
--- /dev/null
+++ b/jstests/in3.js
@@ -0,0 +1,11 @@
+t = db.jstests_in3;
+
+t.drop();
+t.ensureIndex( {i:1} );
+assert.eq( [ [ {i:3}, {i:3} ] ], t.find( {i:{$in:[3]}} ).explain().indexBounds , "A1" );
+assert.eq( [ [ {i:3}, {i:3} ], [ {i:6}, {i:6} ] ], t.find( {i:{$in:[3,6]}} ).explain().indexBounds , "A2" );
+
+for ( var i=0; i<20; i++ )
+ t.insert( { i : i } );
+
+assert.eq( 2 , t.find( {i:{$in:[3,6]}} ).explain().nscanned , "B1" )
diff --git a/jstests/inc2.js b/jstests/inc2.js
index 8442f14..75a8e65 100644
--- a/jstests/inc2.js
+++ b/jstests/inc2.js
@@ -1,5 +1,5 @@
-t = db.inc1
+t = db.inc2
t.drop();
t.save( { _id : 1 , x : 1 } );
diff --git a/jstests/index10.js b/jstests/index10.js
index 105fcc1..c638264 100644
--- a/jstests/index10.js
+++ b/jstests/index10.js
@@ -13,10 +13,18 @@ t.ensureIndex( {i:1} );
assert.eq( 5, t.count() );
t.dropIndexes();
t.ensureIndex( {i:1}, true );
+err = db.getLastErrorObj();
+assert( err.err );
+assert.eq( 11000, err.code );
assert.eq( 1, db.system.indexes.count( {ns:"test.jstests_index10" } ) ); // only id index
// t.dropIndexes();
+ts = t.totalIndexSize();
t.ensureIndex( {i:1}, [ true, true ] );
+ts2 = t.totalIndexSize();
+
+assert.eq( ts * 2, ts2, "totalIndexSize fail" );
+
assert.eq( 3, t.count() );
assert.eq( 1, t.count( {i:1} ) );
diff --git a/jstests/index7.js b/jstests/index7.js
index cf5050b..a3b88d5 100644
--- a/jstests/index7.js
+++ b/jstests/index7.js
@@ -8,18 +8,16 @@ function noIndex( q ) {
assert( q.explain().cursor.match( /^BasicCursor/ ) , "noIndex assert" );
}
-function start( k, q ) {
- var s = q.explain().startKey;
+function start( k, q, rev) {
+ var s = q.explain().indexBounds[rev?1:0][0];
assert.eq( k.a, s.a );
assert.eq( k.b, s.b );
}
-
-function end( k, q ) {
- var e = q.explain().endKey;
+function end( k, q, rev) {
+ var e = q.explain().indexBounds[rev?1:0][1];
assert.eq( k.a, e.a );
assert.eq( k.b, e.b );
}
-
function both( k, q ) {
start( k, q );
end( k, q );
@@ -35,20 +33,20 @@ noIndex( f.find( { a: 5 } ).sort( { a: 1 } ).hint( { $natural: 1 } ) );
f.drop();
f.ensureIndex( { a: 1, b: 1 } );
-assert.eq( 1, f.find( { a: 1 } ).hint( { a: 1, b: 1 } ).explain().startKey.a );
-assert.eq( 1, f.find( { a: 1 } ).hint( { a: 1, b: 1 } ).explain().endKey.a );
-assert.eq( 1, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().startKey.a );
-assert.eq( 1, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().endKey.a );
-assert.eq( null, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().startKey.c );
-assert.eq( null, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().endKey.c );
+assert.eq( 1, f.find( { a: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds[0][0].a );
+assert.eq( 1, f.find( { a: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds[0][1].a );
+assert.eq( 1, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds[0][0].a );
+assert.eq( 1, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds[0][1].a );
+assert.eq( null, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds[0][0].c );
+assert.eq( null, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds[0][1].c );
start( { a: "a", b: 1 }, f.find( { a: /^a/, b: 1 } ).hint( { a: 1, b: 1 } ) );
start( { a: "a", b: 1 }, f.find( { a: /^a/, b: 1 } ).sort( { a: 1, b: 1 } ).hint( { a: 1, b: 1 } ) );
-start( { a: "b", b: 1 }, f.find( { a: /^a/, b: 1 } ).sort( { a: -1, b: -1 } ).hint( { a: 1, b: 1 } ) );
+start( { a: "b", b: 1 }, f.find( { a: /^a/, b: 1 } ).sort( { a: -1, b: -1 } ).hint( { a: 1, b: 1 } ), true );
start( { a: "a", b: 1 }, f.find( { b: 1, a: /^a/ } ).hint( { a: 1, b: 1 } ) );
end( { a: "b", b: 1 }, f.find( { a: /^a/, b: 1 } ).hint( { a: 1, b: 1 } ) );
end( { a: "b", b: 1 }, f.find( { a: /^a/, b: 1 } ).sort( { a: 1, b: 1 } ).hint( { a: 1, b: 1 } ) );
-end( { a: "a", b: 1 }, f.find( { a: /^a/, b: 1 } ).sort( { a: -1, b: -1 } ).hint( { a: 1, b: 1 } ) );
+end( { a: "a", b: 1 }, f.find( { a: /^a/, b: 1 } ).sort( { a: -1, b: -1 } ).hint( { a: 1, b: 1 } ), true );
end( { a: "b", b: 1 }, f.find( { b: 1, a: /^a/ } ).hint( { a: 1, b: 1 } ) );
start( { a: "z", b: 1 }, f.find( { a: /^z/, b: 1 } ).hint( { a: 1, b: 1 } ) );
diff --git a/jstests/index8.js b/jstests/index8.js
index 09a0645..719ad2d 100644
--- a/jstests/index8.js
+++ b/jstests/index8.js
@@ -9,11 +9,14 @@ t.ensureIndex( { c: 1 }, [ false, "cIndex" ] );
checkIndexes = function( num ) {
// printjson( db.system.indexes.find( { ns: "test.jstests_index8" } ).toArray() );
- indexes = db.system.indexes.find( { ns: "test.jstests_index8" } ).sort( { key: 1 } );
- assert( !indexes[ 0 ].unique , "A" + num );
- assert( indexes[ 1 ].unique , "B" + num );
- assert( !indexes[ 2 ].unique , "C" + num );
- assert.eq( "cIndex", indexes[ 2 ].name , "D" + num );
+ indexes = db.system.indexes.find( { ns: "test.jstests_index8" } ).sort( { key: 1 } ).toArray();
+ var start = 0;
+ if ( indexes[0].name == "_id_" )
+ start = 1;
+ assert( !indexes[ start ].unique , "A" + num );
+ assert( indexes[ start + 1 ].unique , "B" + num + " " + tojson( indexes[start+1] ) );
+ assert( !indexes[ start + 2 ].unique , "C" + num );
+ assert.eq( "cIndex", indexes[ start + 2 ].name , "D" + num );
}
checkIndexes( 1 );
diff --git a/jstests/index_check2.js b/jstests/index_check2.js
index 56796ac..a489fd6 100644
--- a/jstests/index_check2.js
+++ b/jstests/index_check2.js
@@ -38,4 +38,4 @@ scanned3 = t.find(q3).explain().nscanned;
assert( scanned3 <= Math.max( scanned1 , scanned2 ) , "$all makes query optimizer not work well" );
exp3 = t.find( q3 ).explain();
-assert.eq( exp3.startKey, exp3.endKey, "$all range not a single key" );
+assert.eq( exp3.indexBounds[0][0], exp3.indexBounds[0][1], "$all range not a single key" );
diff --git a/jstests/index_diag.js b/jstests/index_diag.js
new file mode 100644
index 0000000..38169b3
--- /dev/null
+++ b/jstests/index_diag.js
@@ -0,0 +1,38 @@
+
+t = db.index_diag
+t.drop();
+
+t.ensureIndex( { x : 1 } );
+
+all = []
+ids = []
+xs = []
+
+function r( a ){
+ var n = []
+ for ( var x=a.length-1; x>=0; x-- )
+ n.push( a[x] );
+ return n;
+}
+
+for ( i=1; i<4; i++ ){
+ o = { _id : i , x : -i }
+ t.insert( o );
+ all.push( o );
+ ids.push( { _id : i } );
+ xs.push( { x : -i } );
+}
+
+assert.eq( all , t.find().sort( { _id : 1 } ).toArray() , "A1" );
+assert.eq( r( all ) , t.find().sort( { _id : -1 } ).toArray() , "A2" );
+
+assert.eq( all , t.find().sort( { x : -1 } ).toArray() , "A3" );
+assert.eq( r( all ) , t.find().sort( { x : 1 } ).toArray() , "A4" );
+
+assert.eq( ids , t.find().sort( { _id : 1 } )._addSpecial( "$returnKey" , true ).toArray() , "B1" )
+assert.eq( r( ids ) , t.find().sort( { _id : -1 } )._addSpecial( "$returnKey" , true ).toArray() , "B2" )
+assert.eq( xs , t.find().sort( { x : -1 } )._addSpecial( "$returnKey" , true ).toArray() , "B3" )
+assert.eq( r( xs ) , t.find().sort( {x : 1 } )._addSpecial( "$returnKey" , true ).toArray() , "B4" )
+
+assert.eq( r( xs ) , t.find().hint( { x : 1 } )._addSpecial( "$returnKey" , true ).toArray() , "B4" )
+
diff --git a/jstests/indexg.js b/jstests/indexg.js
new file mode 100644
index 0000000..a0709fd
--- /dev/null
+++ b/jstests/indexg.js
@@ -0,0 +1,13 @@
+
+f = db.jstests_indexg;
+f.drop();
+f.save( { list: [1, 2] } );
+f.save( { list: [1, 3] } );
+
+doit = function() {
+ assert.eq( 1, f.count( { list: { $in: [1], $ne: 3 } } ) );
+ assert.eq( 1, f.count( { list: { $in: [1], $not:{$in: [3] } } } ) );
+}
+doit();
+f.ensureIndex( { list: 1 } );
+doit(); \ No newline at end of file
diff --git a/jstests/insert1.js b/jstests/insert1.js
new file mode 100644
index 0000000..76edca1
--- /dev/null
+++ b/jstests/insert1.js
@@ -0,0 +1,41 @@
+t = db.insert1;
+t.drop();
+
+o = {a:1};
+t.insert(o);
+id = t._lastID
+assert.eq(o, {a:1}, "input unchanged 1");
+assert.eq(typeof(id), "object", "1");
+assert.eq(id.constructor, ObjectId, "1");
+assert.eq(t.findOne({_id:id}).a, 1, "find by id 1");
+assert.eq(t.findOne({a:1})._id, id , "find by val 1");
+
+o = {a:2, _id:new ObjectId()};
+id1 = o._id
+t.insert(o);
+id2 = t._lastID
+assert.eq(id1, id2, "ids match 2");
+assert.eq(o, {a:2, _id:id1}, "input unchanged 2");
+assert.eq(typeof(id2), "object", "2");
+assert.eq(id2.constructor, ObjectId, "2");
+assert.eq(t.findOne({_id:id1}).a, 2, "find by id 2");
+assert.eq(t.findOne({a:2})._id, id1 , "find by val 2");
+
+o = {a:3, _id:"asdf"};
+id1 = o._id
+t.insert(o);
+id2 = t._lastID
+assert.eq(id1, id2, "ids match 3");
+assert.eq(o, {a:3, _id:id1}, "input unchanged 3");
+assert.eq(typeof(id2), "string", "3");
+assert.eq(t.findOne({_id:id1}).a, 3, "find by id 3");
+assert.eq(t.findOne({a:3})._id, id1 , "find by val 3");
+
+o = {a:4, _id:null};
+id1 = o._id
+t.insert(o);
+id2 = t._lastID
+assert.eq(id1, id2, "ids match 4");
+assert.eq(o, {a:4, _id:id1}, "input unchanged 4");
+assert.eq(t.findOne({_id:id1}).a, 4, "find by id 4");
+assert.eq(t.findOne({a:4})._id, id1 , "find by val 4");
diff --git a/jstests/json1.js b/jstests/json1.js
index a3dc820..e045df7 100644
--- a/jstests/json1.js
+++ b/jstests/json1.js
@@ -5,8 +5,8 @@ assert.eq( tojson( x ) , tojson( y ) , "A" );
assert.eq( typeof( x.nulls ) , typeof( y.nulls ) , "B" );
// each type is parsed properly
-x = {"x" : null, "y" : true, "z" : 123, "w" : "foo"};
-assert.eq(tojson(x,"",false), '{\n\t"x" : null,\n\t"y" : true,\n\t"z" : 123,\n\t"w" : "foo"\n}' , "C" );
+x = {"x" : null, "y" : true, "z" : 123, "w" : "foo", "a": undefined};
+assert.eq(tojson(x,"",false), '{\n\t"x" : null,\n\t"y" : true,\n\t"z" : 123,\n\t"w" : "foo",\n\t"a" : undefined\n}' , "C" );
x = {"x" : [], "y" : {}};
assert.eq(tojson(x,"",false), '{\n\t"x" : [ ],\n\t"y" : {\n\t\t\n\t}\n}' , "D" );
diff --git a/jstests/mod1.js b/jstests/mod1.js
index eca35b7..46e3482 100644
--- a/jstests/mod1.js
+++ b/jstests/mod1.js
@@ -22,3 +22,4 @@ assert.eq( 1 , t.find( "this.a % 10 == 0" ).itcount() , "B3" );
assert.eq( 1 , t.find( { a : { $mod : [ 10 , 0 ] } } ).itcount() , "B4" );
assert.eq( 4 , t.find( { a : { $mod : [ 10 , 1 ] } } ).explain().nscanned , "B5" );
+assert.eq( 1, t.find( { a: { $gt: 5, $mod : [ 10, 1 ] } } ).itcount() ); \ No newline at end of file
diff --git a/jstests/mr5.js b/jstests/mr5.js
index 50eb366..bbac3fe 100644
--- a/jstests/mr5.js
+++ b/jstests/mr5.js
@@ -26,12 +26,31 @@ r = function( k , v ){
}
res = t.mapReduce( m , r , { scope : { xx : 1 } } );
-res.find().forEach( printjson )
+//res.find().forEach( printjson )
z = res.convertToSingleObject()
-assert.eq( 2 , Object.keySet( z ).length , "A" )
-assert.eq( [ 9 , 11 , 30 ] , z["1"].stats , "B" )
-assert.eq( [ 9 , 41 , 41 ] , z["2"].stats , "B" )
+assert.eq( 2 , Object.keySet( z ).length , "A1" )
+assert.eq( [ 9 , 11 , 30 ] , z["1"].stats , "A2" )
+assert.eq( [ 9 , 41 , 41 ] , z["2"].stats , "A3" )
+
+
+res.drop()
+
+m = function(){
+ var x = "partner";
+ var y = "visits";
+ emit( this[x] , { stats : [ this[y] ] } )
+}
+
+
+
+res = t.mapReduce( m , r , { scope : { xx : 1 } } );
+//res.find().forEach( printjson )
+
+z = res.convertToSingleObject()
+assert.eq( 2 , Object.keySet( z ).length , "B1" )
+assert.eq( [ 9 , 11 , 30 ] , z["1"].stats , "B2" )
+assert.eq( [ 9 , 41 , 41 ] , z["2"].stats , "B3" )
res.drop()
diff --git a/jstests/mr_bigobject.js b/jstests/mr_bigobject.js
new file mode 100644
index 0000000..8224209
--- /dev/null
+++ b/jstests/mr_bigobject.js
@@ -0,0 +1,41 @@
+
+t = db.mr_bigobject
+t.drop()
+
+s = "";
+while ( s.length < ( 1024 * 1024 ) ){
+ s += "asdasdasd";
+}
+
+for ( i=0; i<10; i++ )
+ t.insert( { _id : i , s : s } )
+
+m = function(){
+ emit( 1 , this.s + this.s );
+}
+
+r = function( k , v ){
+ return 1;
+}
+
+assert.throws( function(){ t.mapReduce( m , r ); } , "emit should fail" )
+
+m = function(){
+ emit( 1 , this.s );
+}
+
+assert.eq( { 1 : 1 } , t.mapReduce( m , r ).convertToSingleObject() , "A1" )
+
+r = function( k , v ){
+ total = 0;
+ for ( var i=0; i<v.length; i++ ){
+ var x = v[i];
+ if ( typeof( x ) == "number" )
+ total += x
+ else
+ total += x.length;
+ }
+ return total;
+}
+
+assert.eq( { 1 : 10 * s.length } , t.mapReduce( m , r ).convertToSingleObject() , "A1" )
diff --git a/jstests/mr_errorhandling.js b/jstests/mr_errorhandling.js
new file mode 100644
index 0000000..57724f1
--- /dev/null
+++ b/jstests/mr_errorhandling.js
@@ -0,0 +1,47 @@
+
+t = db.mr_errorhandling;
+t.drop();
+
+t.save( { a : [ 1 , 2 , 3 ] } )
+t.save( { a : [ 2 , 3 , 4 ] } )
+
+m_good = function(){
+ for ( var i=0; i<this.a.length; i++ ){
+ emit( this.a[i] , 1 );
+ }
+}
+
+m_bad = function(){
+ for ( var i=0; i<this.a.length; i++ ){
+ emit( this.a[i] );
+ }
+}
+
+r = function( k , v ){
+ var total = 0;
+ for ( var i=0; i<v.length; i++ )
+ total += v[i];
+ return total;
+}
+
+res = t.mapReduce( m_good , r );
+assert.eq( { 1 : 1 , 2 : 2 , 3 : 2 , 4 : 1 } , res.convertToSingleObject() , "A" );
+res.drop()
+
+res = null;
+
+theerror = null;
+try {
+ res = t.mapReduce( m_bad , r );
+}
+catch ( e ){
+ theerror = e.toString();
+}
+assert.isnull( res , "B1" );
+assert( theerror , "B2" );
+assert( theerror.indexOf( "emit" ) >= 0 , "B3" );
+
+// test things are still in an ok state
+res = t.mapReduce( m_good , r );
+assert.eq( { 1 : 1 , 2 : 2 , 3 : 2 , 4 : 1 } , res.convertToSingleObject() , "A" );
+res.drop()
diff --git a/jstests/nin.js b/jstests/nin.js
index 4afd344..0658278 100644
--- a/jstests/nin.js
+++ b/jstests/nin.js
@@ -29,6 +29,7 @@ doTest = function( n ) {
assert.eq( 4, t.find( { a: { $nin: [ 9 ] } } ).count() , n + " G" );
assert.eq( 4, t.find( { a: { $nin: [ 3 ] } } ).count() , n + " H" );
assert.eq( 3, t.find( { a: { $nin: [ 2, 3 ] } } ).count() , n + " I" );
+ assert.eq( 1, t.find( { a: { $ne: 8, $nin: [ 2, 3 ] } } ).count() , n + " I2" );
checkEqual( n + " A" , "a" , 5 );
@@ -43,7 +44,7 @@ doTest = function( n ) {
checkEqual( n + " C" , "a.b" , 5 );
assert.eq( 7, t.find( { 'a.b': { $nin: [ 10 ] } } ).count() , n + " L" );
- assert.eq( 8, t.find( { 'a.b': { $nin: [ [ 10, 11 ] ] } } ).count() , n + " M" );
+ assert.eq( 7, t.find( { 'a.b': { $nin: [ [ 10, 11 ] ] } } ).count() , n + " M" );
assert.eq( 7, t.find( { a: { $nin: [ 11 ] } } ).count() , n + " N" );
t.save( { a: { b: [ 20, 30 ] } } );
diff --git a/jstests/not2.js b/jstests/not2.js
new file mode 100644
index 0000000..5d33baa
--- /dev/null
+++ b/jstests/not2.js
@@ -0,0 +1,139 @@
+t = db.jstests_not2;
+t.drop();
+
+check = function( query, expected, size ) {
+ if ( size == null ) {
+ size = 1;
+ }
+ assert.eq( size, t.count( query ), tojson( query ) );
+ if ( size > 0 ) {
+ assert.eq( expected, t.findOne( query ).i, tojson( query ) );
+ }
+}
+
+fail = function( query ) {
+ try {
+ t.count( query );
+ } catch ( e ) {
+ }
+ assert( db.getLastError(), tojson( query ) );
+}
+
+doTest = function() {
+
+t.remove( {} );
+
+t.save( {i:"a"} );
+t.save( {i:"b"} );
+
+fail( {i:{$not:"a"}} );
+fail( {i:{$not:{$not:{$gt:"a"}}}} );
+fail( {i:{$not:{$ref:"foo"}}} );
+fail( {i:{$not:{}}} );
+check( {i:{$gt:"a"}}, "b" );
+check( {i:{$not:{$gt:"a"}}}, "a" );
+check( {i:{$not:{$ne:"a"}}}, "a" );
+check( {i:{$not:{$gte:"b"}}}, "a" );
+check( {i:{$exists:true}}, "a", 2 );
+check( {i:{$not:{$exists:true}}}, "", 0 );
+check( {j:{$not:{$exists:false}}}, "", 0 );
+check( {j:{$not:{$exists:true}}}, "a", 2 );
+check( {i:{$not:{$in:["a"]}}}, "b" );
+check( {i:{$not:{$in:["a", "b"]}}}, "", 0 );
+check( {i:{$not:{$in:["g"]}}}, "a", 2 );
+check( {i:{$not:{$nin:["a"]}}}, "a" );
+check( {i:{$not:/a/}}, "b" );
+check( {i:{$not:/(a|b)/}}, "", 0 );
+check( {i:{$not:/a/,$regex:"a"}}, "", 0 );
+check( {i:{$not:/aa/}}, "a", 2 );
+fail( {i:{$not:{$regex:"a"}}} );
+fail( {i:{$not:{$options:"a"}}} );
+check( {i:{$type:2}}, "a", 2 );
+check( {i:{$not:{$type:1}}}, "a", 2 );
+check( {i:{$not:{$type:2}}}, "", 0 );
+
+check( {i:{$not:{$gt:"c",$lt:"b"}}}, "b" );
+
+t.remove( {} );
+t.save( {i:1} );
+check( {i:{$not:{$mod:[5,1]}}}, null, 0 );
+check( {i:{$mod:[5,2]}}, null, 0 );
+check( {i:{$not:{$mod:[5,2]}}}, 1, 1 );
+
+t.remove( {} );
+t.save( {i:["a","b"]} );
+check( {i:{$not:{$size:2}}}, null, 0 );
+check( {i:{$not:{$size:3}}}, ["a","b"] );
+check( {i:{$not:{$gt:"a"}}}, null, 0 );
+check( {i:{$not:{$gt:"c"}}}, ["a","b"] );
+check( {i:{$not:{$all:["a","b"]}}}, null, 0 );
+check( {i:{$not:{$all:["c"]}}}, ["a","b"] );
+
+t.remove( {} );
+t.save( {i:{j:"a"}} );
+t.save( {i:{j:"b"}} );
+check( {i:{$not:{$elemMatch:{j:"a"}}}}, {j:"b"} );
+check( {i:{$not:{$elemMatch:{j:"f"}}}}, {j:"a"}, 2 );
+
+}
+
+doTest();
+t.ensureIndex( {i:1} );
+doTest();
+
+t.drop();
+t.save( {i:"a"} );
+t.save( {i:"b"} );
+t.ensureIndex( {i:1} );
+
+indexed = function( query, min, max ) {
+ exp = t.find( query ).explain();
+// printjson( exp );
+ assert( exp.cursor.match( /Btree/ ), tojson( query ) );
+ assert( exp.allPlans.length == 1, tojson( query ) );
+ // just expecting one element per key
+ for( i in exp.indexBounds[0][0] ) {
+ assert.eq( exp.indexBounds[0][0][ i ], min );
+ }
+ for( i in exp.indexBounds[0][1] ) {
+ assert.eq( exp.indexBounds[0][1][ i ], max );
+ }
+}
+
+not = function( query ) {
+ exp = t.find( query ).explain();
+// printjson( exp );
+ assert( !exp.cursor.match( /Btree/ ), tojson( query ) );
+ assert( exp.allPlans.length == 1, tojson( query ) );
+}
+
+indexed( {i:1}, 1, 1 );
+not( {i:{$ne:1}} );
+
+indexed( {i:{$not:{$ne:"a"}}}, "a", "a" );
+not( {i:{$not:/^a/}} );
+
+indexed( {i:{$gt:"a"}}, "a", {} );
+indexed( {i:{$not:{$gt:"a"}}}, "", "a" );
+
+indexed( {i:{$gte:"a"}}, "a", {} );
+indexed( {i:{$not:{$gte:"a"}}}, "", "a" );
+
+indexed( {i:{$lt:"b"}}, "", "b" );
+indexed( {i:{$not:{$lt:"b"}}}, "b", {} );
+
+indexed( {i:{$lte:"b"}}, "", "b" );
+indexed( {i:{$not:{$lte:"b"}}}, "b", {} );
+
+not( {i:{$not:{$all:["a"]}}} );
+not( {i:{$not:{$mod:[2,1]}}} );
+not( {i:{$not:{$type:2}}} );
+
+indexed( {i:{$in:[1]}}, 1, 1 );
+not( {i:{$not:{$in:[1]}}} );
+
+t.drop();
+t.ensureIndex( {"i.j":1} );
+indexed( {i:{$elemMatch:{j:1}}}, 1, 1 );
+not( {i:{$not:{$elemMatch:{j:1}}}} );
+indexed( {i:{$not:{$elemMatch:{j:{$ne:1}}}}}, 1, 1 );
diff --git a/jstests/parallel/basic.js b/jstests/parallel/basic.js
index 9c10306..bcb4d65 100644
--- a/jstests/parallel/basic.js
+++ b/jstests/parallel/basic.js
@@ -9,3 +9,8 @@ for( i in params ) {
}
t.run( "one or more tests failed", true );
+
+db.getCollectionNames().forEach( function( x ) {
+ v = db[ x ].validate();
+ assert( v.valid, "validate failed for " + x + " with " + tojson( v ) );
+ } ); \ No newline at end of file
diff --git a/jstests/parallel/basicPlus.js b/jstests/parallel/basicPlus.js
index d6f9a4d..4d65d25 100644
--- a/jstests/parallel/basicPlus.js
+++ b/jstests/parallel/basicPlus.js
@@ -23,4 +23,8 @@ for( var i = 4; i < 8; ++i ) {
t.run( "one or more tests failed", true );
-assert( c.validate().valid, "validate failed" ); \ No newline at end of file
+assert( c.validate().valid, "validate failed" );
+db.getCollectionNames().forEach( function( x ) {
+ v = db[ x ].validate();
+ assert( v.valid, "validate failed for " + x + " with " + tojson( v ) );
+ } ); \ No newline at end of file
diff --git a/jstests/parallel/repl.js b/jstests/parallel/repl.js
new file mode 100644
index 0000000..cb9b770
--- /dev/null
+++ b/jstests/parallel/repl.js
@@ -0,0 +1,55 @@
+// test all operations in parallel
+
+baseName = "parallel_repl"
+
+rt = new ReplTest( baseName );
+
+m = rt.start( true );
+s = rt.start( false );
+
+db = m.getDB( "test" );
+
+Random.setRandomSeed();
+
+t = new ParallelTester();
+
+for( id = 0; id < 10; ++id ) {
+ var g = new EventGenerator( id, baseName, Random.randInt( 20 ) );
+ for( var j = 0; j < 1000; ++j ) {
+ var op = Random.randInt( 3 );
+ switch( op ) {
+ case 0: // insert
+ g.addInsert( { _id:Random.randInt( 1000 ) } );
+ break;
+ case 1: // remove
+ g.addRemove( { _id:Random.randInt( 1000 ) } );
+ break;
+ case 2: // update
+ g.addUpdate( {_id:{$lt:1000}}, {a:{$inc:5}} );
+ break;
+ default:
+ assert( false, "Invalid op code" );
+ }
+ }
+ t.add( EventGenerator.dispatch, g.getEvents() );
+}
+
+var g = new EventGenerator( id, baseName, Random.randInt( 5 ) );
+for( var j = 1000; j < 3000; ++j ) {
+ g.addCheckCount( j - 1000, { _id: {$gte:1000} }, j % 100 == 0, j % 500 == 0 );
+ g.addInsert( {_id:j} );
+}
+t.add( EventGenerator.dispatch, g.getEvents() );
+
+t.run( "one or more tests failed" );
+
+assert( m.getDB( "test" )[ baseName ].validate().valid );
+assert( s.getDB( "test" )[ baseName ].validate().valid );
+
+assert.soon( function() {
+ mh = m.getDB( "test" ).runCommand( "dbhash" );
+// printjson( mh );
+ sh = s.getDB( "test" ).runCommand( "dbhash" );
+// printjson( sh );
+ return mh.md5 == sh.md5;
+ } );
diff --git a/jstests/profile1.js b/jstests/profile1.js
index ea53b09..49f6838 100644
--- a/jstests/profile1.js
+++ b/jstests/profile1.js
@@ -13,6 +13,8 @@ var capped_size = db.system.profile.storageSize();
assert.gt(capped_size, 999, "D");
assert.lt(capped_size, 2000, "E");
+db.foo.findOne()
+
assert.eq( 4 , db.system.profile.find().count() , "E2" );
/* Make sure we can't drop if profiling is still on */
diff --git a/jstests/pullall.js b/jstests/pullall.js
index b720ce5..76b1b47 100644
--- a/jstests/pullall.js
+++ b/jstests/pullall.js
@@ -1,4 +1,4 @@
-t = db.jstests_pushall;
+t = db.jstests_pullall;
t.drop();
t.save( { a: [ 1, 2, 3 ] } );
diff --git a/jstests/regex4.js b/jstests/regex4.js
index 568c937..fc26d69 100644
--- a/jstests/regex4.js
+++ b/jstests/regex4.js
@@ -1,5 +1,5 @@
-t = db.regex3;
+t = db.regex4;
t.drop();
t.save( { name : "eliot" } );
diff --git a/jstests/regex5.js b/jstests/regex5.js
index 7fe39d5..418752b 100644
--- a/jstests/regex5.js
+++ b/jstests/regex5.js
@@ -2,12 +2,46 @@
t = db.regex5
t.drop()
-t.save( { x : [ "abc" , "xyz" ] } )
-t.save( { x : [ "ac" , "xyz" ] } )
+t.save( { x : [ "abc" , "xyz1" ] } )
+t.save( { x : [ "ac" , "xyz2" ] } )
a = /.*b.*c/
x = /.*y.*/
-assert.eq( 1 , t.find( { x : a } ).count() , "A" )
-assert.eq( 2 , t.find( { x : x } ).count() , "B" )
-// assert.eq( 1 , t.find( { x : { $all : [ a , x ] } } ).count() , "C" ) // SERVER-505
+doit = function() {
+
+ assert.eq( 1 , t.find( { x : a } ).count() , "A" );
+ assert.eq( 2 , t.find( { x : x } ).count() , "B" );
+ assert.eq( 2 , t.find( { x : { $in: [ x ] } } ).count() , "C" ); // SERVER-322
+ assert.eq( 1 , t.find( { x : { $in: [ a, "xyz1" ] } } ).count() , "D" ); // SERVER-322
+ assert.eq( 2 , t.find( { x : { $in: [ a, "xyz2" ] } } ).count() , "E" ); // SERVER-322
+ assert.eq( 1 , t.find( { x : { $all : [ a , x ] } } ).count() , "F" ); // SERVER-505
+ assert.eq( 1 , t.find( { x : { $all : [ a , "abc" ] } } ).count() , "G" ); // SERVER-505
+ assert.eq( 0 , t.find( { x : { $all : [ a , "ac" ] } } ).count() , "H" ); // SERVER-505
+ assert.eq( 0 , t.find( { x : { $nin: [ x ] } } ).count() , "I" ); // SERVER-322
+ assert.eq( 1 , t.find( { x : { $nin: [ a, "xyz1" ] } } ).count() , "J" ); // SERVER-322
+ assert.eq( 0 , t.find( { x : { $nin: [ a, "xyz2" ] } } ).count() , "K" ); // SERVER-322
+ assert.eq( 2 , t.find( { x : { $not: { $nin: [ x ] } } } ).count() , "L" ); // SERVER-322
+ assert.eq( 1 , t.find( { x : { $nin: [ /^a.c/ ] } } ).count() , "M" ) // SERVER-322
+}
+
+doit();
+t.ensureIndex( {x:1} );
+print( "now indexed" );
+doit();
+
+// check bound unions SERVER-322
+assert.eq( [
+ [ {x:1},{x:1} ],
+ [ {x:2.5},{x:2.5} ],
+ [ {x:"a"},{x:"a"} ],
+ [ {x:"b"},{x:"e"} ],
+ [ {x:/^b/},{x:/^b/} ],
+ [ {x:/^c/},{x:/^c/} ],
+ [ {x:/^d/},{x:/^d/} ]
+ ],
+ t.find( { x : { $in: [ 1, 2.5, "a", "b", /^b/, /^c/, /^d/ ] } } ).explain().indexBounds );
+
+// SERVER-505
+assert.eq( [ [ {x:"a"}, {x:"a"} ] ], t.find( { x : { $all: [ "a", /^a/ ] } } ).explain().indexBounds );
+assert.eq( [ [ {x:"a"}, {x:"b"} ] ], t.find( { x : { $all: [ /^a/ ] } } ).explain().indexBounds );
diff --git a/jstests/regex6.js b/jstests/regex6.js
index d25367c..12ed85b 100644
--- a/jstests/regex6.js
+++ b/jstests/regex6.js
@@ -10,10 +10,13 @@ t.save( { name : "aaron" } );
t.ensureIndex( { name : 1 } );
assert.eq( 0 , t.find( { name : /^\// } ).count() , "index count" );
-assert.eq( 0 , t.find( { name : /^\// } ).explain().nscanned , "index explain" );
-assert.eq( 0 , t.find( { name : /^é/ } ).explain().nscanned , "index explain" );
-assert.eq( 0 , t.find( { name : /^\é/ } ).explain().nscanned , "index explain" );
-assert.eq( 0 , t.find( { name : /^\./ } ).explain().nscanned , "index explain" );
-assert.eq( 4 , t.find( { name : /^./ } ).explain().nscanned , "index explain" );
+assert.eq( 0 , t.find( { name : /^\// } ).explain().nscanned , "index explain 1" );
+assert.eq( 0 , t.find( { name : /^é/ } ).explain().nscanned , "index explain 2" );
+assert.eq( 0 , t.find( { name : /^\é/ } ).explain().nscanned , "index explain 3" );
+assert.eq( 0 , t.find( { name : /^\./ } ).explain().nscanned , "index explain 4" );
+assert.eq( 4 , t.find( { name : /^./ } ).explain().nscanned , "index explain 5" );
-assert.eq( 4 , t.find( { name : /^\Qblah\E/ } ).explain().nscanned , "index explain" );
+assert.eq( 4 , t.find( { name : /^\Qblah\E/ } ).explain().nscanned , "index explain 6" );
+
+assert.eq( 1, t.find( { name : { $regex : "^e", $gte: "emily" } } ).explain().nscanned , "ie7" );
+assert.eq( 1, t.find( { name : { $gt : "a", $regex: "^emily" } } ).explain().nscanned , "ie7" );
diff --git a/jstests/regex7.js b/jstests/regex7.js
new file mode 100644
index 0000000..ab4f608
--- /dev/null
+++ b/jstests/regex7.js
@@ -0,0 +1,26 @@
+t = db.regex_matches_self;
+t.drop();
+
+t.insert({r:/^a/});
+t.insert({r:/^a/i});
+t.insert({r:/^b/});
+
+// no index
+assert.eq( /^a/, t.findOne({r:/^a/}).r, '1 1 a')
+assert.eq( 1, t.count({r:/^a/}), '1 2')
+assert.eq( /^a/i, t.findOne({r:/^a/i}).r, '2 1 a')
+assert.eq( 1, t.count({r:/^a/i}), '2 2 a')
+assert.eq( /^b/, t.findOne({r:/^b/}).r, '3 1 a')
+assert.eq( 1, t.count({r:/^b/}), '3 2 a')
+
+// with index
+t.ensureIndex({r:1})
+assert.eq( /^a/, t.findOne({r:/^a/}).r, '1 1 b')
+assert.eq( 1, t.count({r:/^a/}), '1 2 b')
+assert.eq( /^a/i, t.findOne({r:/^a/i}).r, '2 1 b')
+assert.eq( 1, t.count({r:/^a/i}), '2 2 b')
+assert.eq( /^b/, t.findOne({r:/^b/}).r, '3 1 b')
+assert.eq( 1, t.count({r:/^b/}), '3 2 b')
+
+t.insert( {r:"a"} );
+assert.eq( 2, t.count({r:/^a/}), 'c' ); \ No newline at end of file
diff --git a/jstests/regex8.js b/jstests/regex8.js
new file mode 100644
index 0000000..33dd74f
--- /dev/null
+++ b/jstests/regex8.js
@@ -0,0 +1,19 @@
+
+t = db.regex8;
+t.drop()
+
+t.insert( { _id : 1 , a : "abc" } )
+t.insert( { _ud : 2 , a : "abc" } )
+t.insert( { _id : 3 , a : "bdc" } )
+
+function test( msg ){
+ assert.eq( 3 , t.find().itcount() , msg + "1" )
+ assert.eq( 2 , t.find( { a : /a.*/ } ).itcount() , msg + "2" )
+ assert.eq( 3 , t.find( { a : /[ab].*/ } ).itcount() , msg + "3" )
+ assert.eq( 3 , t.find( { a : /[a|b].*/ } ).itcount() , msg + "4" )
+}
+
+test( "A" );
+
+t.ensureIndex( { a : 1 } )
+test( "B" )
diff --git a/jstests/regex9.js b/jstests/regex9.js
new file mode 100644
index 0000000..559efd9
--- /dev/null
+++ b/jstests/regex9.js
@@ -0,0 +1,11 @@
+
+t = db.regex3;
+t.drop();
+
+t.insert( { _id : 1 , a : [ "a" , "b" , "c" ] } )
+t.insert( { _id : 2 , a : [ "a" , "b" , "c" , "d" ] } )
+t.insert( { _id : 3 , a : [ "b" , "c" , "d" ] } )
+
+assert.eq( 2 , t.find( { a : /a/ } ).itcount() , "A1" )
+assert.eq( 2 , t.find( { a : { $regex : "a" } } ).itcount() , "A2" )
+assert.eq( 2 , t.find( { a : { $regex : /a/ } } ).itcount() , "A3" )
diff --git a/jstests/regex_embed1.js b/jstests/regex_embed1.js
new file mode 100644
index 0000000..61b1b9a
--- /dev/null
+++ b/jstests/regex_embed1.js
@@ -0,0 +1,25 @@
+
+t = db.regex_embed1
+
+t.drop()
+
+t.insert( { _id : 1 , a : [ { x : "abc" } , { x : "def" } ] } )
+t.insert( { _id : 2 , a : [ { x : "ab" } , { x : "de" } ] } )
+t.insert( { _id : 3 , a : [ { x : "ab" } , { x : "de" } , { x : "abc" } ] } )
+
+function test( m ){
+ assert.eq( 3 , t.find().itcount() , m + "1" );
+ assert.eq( 2 , t.find( { "a.x" : "abc" } ).itcount() , m + "2" );
+ assert.eq( 2 , t.find( { "a.x" : /.*abc.*/ } ).itcount() , m + "3" );
+
+ assert.eq( 1 , t.find( { "a.0.x" : "abc" } ).itcount() , m + "4" );
+ assert.eq( 1 , t.find( { "a.0.x" : /abc/ } ).itcount() , m + "5" );
+}
+
+test( "A" );
+
+t.ensureIndex( { "a.x" : 1 } )
+test( "B" );
+
+
+
diff --git a/jstests/repl/basic1.js b/jstests/repl/basic1.js
index 9668a91..e0acf5c 100644
--- a/jstests/repl/basic1.js
+++ b/jstests/repl/basic1.js
@@ -52,6 +52,51 @@ check( "B" );
am.a.update( {} , { $inc : { x : 1 } } , false , true );
check( "C" );
+// ----- check features -------
+
+// map/reduce
+am.mr.insert( { tags : [ "a" ] } )
+am.mr.insert( { tags : [ "a" , "b" ] } )
+am.getLastError();
+check( "mr setup" );
+
+m = function(){
+ for ( var i=0; i<this.tags.length; i++ ){
+ print( "\t " + i );
+ emit( this.tags[i] , 1 );
+ }
+}
+
+r = function( key , v ){
+ return Array.sum( v );
+}
+
+correct = { a : 2 , b : 1 };
+
+function checkMR( t ){
+ var res = t.mapReduce( m , r );
+ assert.eq( correct , res.convertToSingleObject() , "checkMR: " + tojson( t ) );
+}
+
+function checkNumCollections( msg , diff ){
+ if ( ! diff ) diff = 0;
+ var m = am.getCollectionNames();
+ var s = as.getCollectionNames();
+ assert.eq( m.length + diff , s.length , "lengths bad \n" + tojson( m ) + "\n" + tojson( s ) );
+}
+
+checkNumCollections( "MR1" );
+checkMR( am.mr );
+checkMR( as.mr );
+checkNumCollections( "MR2" );
+
+sleep( 3000 );
+checkNumCollections( "MR3" );
+
+var res = am.mr.mapReduce( m , r , { out : "xyz" } );
+sleep( 3000 );
+checkNumCollections( "MR4" );
+
rt.stop();
diff --git a/jstests/repl/master1.js b/jstests/repl/master1.js
new file mode 100644
index 0000000..9f021fc
--- /dev/null
+++ b/jstests/repl/master1.js
@@ -0,0 +1,49 @@
+// Test handling of clock skew and optimes across mongod instances
+
+var baseName = "jstests_repl_master1test";
+
+oplog = function() {
+ return m.getDB( "local" ).oplog.$main;
+}
+
+lastop = function() {
+ return oplog().find().sort( {$natural:-1} ).next();
+}
+
+am = function() {
+ return m.getDB( baseName ).a;
+}
+
+rt = new ReplTest( baseName );
+
+m = rt.start( true );
+
+am().save( {} );
+assert.eq( "i", lastop().op );
+
+op = lastop();
+printjson( op );
+op.ts.t = op.ts.t + 600000 // 10 minutes
+m.getDB( "local" ).runCommand( {godinsert:"oplog.$main", obj:op} );
+
+rt.stop( true );
+m = rt.start( true, null, true );
+
+assert.eq( op.ts.t, lastop().ts.t );
+am().save( {} );
+assert.eq( op.ts.t, lastop().ts.t );
+assert.eq( op.ts.i + 1, lastop().ts.i );
+
+op = lastop();
+printjson( op );
+op.ts.i = Math.pow(2,31);
+printjson( op );
+m.getDB( "local" ).runCommand( {godinsert:"oplog.$main", obj:op} );
+
+rt.stop( true );
+m = rt.start( true, null, true );
+assert.eq( op.ts.i, lastop().ts.i );
+am().save( {} );
+sleep( 3000 ); // make sure dies on its own before stop() called
+
+assert.eq( 47 /*EXIT_CLOCK_SKEW*/, rt.stop( true ) ); \ No newline at end of file
diff --git a/jstests/repl/pair1.js b/jstests/repl/pair1.js
index 7004048..b8b7ffd 100644
--- a/jstests/repl/pair1.js
+++ b/jstests/repl/pair1.js
@@ -34,6 +34,7 @@ check = function( s ) {
assert.soon( function() {
return 1 == getCount( s );
} );
+ sleep( 500 ); // wait for sync clone to finish up
}
// check that slave reads and writes are guarded
diff --git a/jstests/repl/pair3.js b/jstests/repl/pair3.js
index 506e173..d1cf99a 100644
--- a/jstests/repl/pair3.js
+++ b/jstests/repl/pair3.js
@@ -226,6 +226,8 @@ doTest4 = function( signal ) {
// now can only talk to arbiter
pair.start( true );
pair.waitForSteadyState( [ 1, 1 ], null, true );
+
+ ports.forEach( function( x ) { stopMongoProgram( x ); } );
}
doTest1();
diff --git a/jstests/repl/pair4.js b/jstests/repl/pair4.js
index 5a59c16..c04433e 100644
--- a/jstests/repl/pair4.js
+++ b/jstests/repl/pair4.js
@@ -134,6 +134,7 @@ doTest = function( recover, newMaster, newSlave ) {
}
+// right will be master on recovery b/c both sides will have completed initial sync
debug( "basic test" );
doTest( function() {
connect();
diff --git a/jstests/repl/pair5.js b/jstests/repl/pair5.js
index ed8c72d..de7e2d5 100644
--- a/jstests/repl/pair5.js
+++ b/jstests/repl/pair5.js
@@ -62,7 +62,7 @@ doTest = function( nSlave, opIdMem ) {
disconnect();
pair.waitForSteadyState( [ 1, 1 ], null, true );
- // left will become slave
+ // left will become slave (b/c both completed initial sync)
for( i = 0; i < nSlave; ++i ) {
write( pair.left(), i, i );
}
diff --git a/jstests/repl/pair7.js b/jstests/repl/pair7.js
new file mode 100644
index 0000000..52ef91f
--- /dev/null
+++ b/jstests/repl/pair7.js
@@ -0,0 +1,85 @@
+// pairing with auth
+
+var baseName = "jstests_pair7test";
+
+setAdmin = function( n ) {
+ n.getDB( "admin" ).addUser( "super", "super" );
+ n.getDB( "local" ).addUser( "repl", "foo" );
+ n.getDB( "local" ).system.users.findOne();
+}
+
+auth = function( n ) {
+ return n.getDB( baseName ).auth( "test", "test" );
+}
+
+doTest = function( signal ) {
+
+ ports = allocatePorts( 3 );
+
+ m = startMongod( "--port", ports[ 1 ], "--dbpath", "/data/db/" + baseName + "-left", "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+ setAdmin( m );
+ stopMongod( ports[ 1 ] );
+
+ m = startMongod( "--port", ports[ 2 ], "--dbpath", "/data/db/" + baseName + "-right", "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+ setAdmin( m );
+ stopMongod( ports[ 2 ] );
+
+ a = new MongodRunner( ports[ 0 ], "/data/db/" + baseName + "-arbiter" );
+ l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ], [ "--auth" ] );
+ r = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ], [ "--auth" ] );
+
+ rp = new ReplPair( l, r, a );
+ rp.start( true );
+ rp.waitForSteadyState();
+
+ rp.master().getDB( "admin" ).auth( "super", "super" );
+ rp.master().getDB( baseName ).addUser( "test", "test" );
+ auth( rp.master() ); // reauth
+ assert.soon( function() { return auth( rp.slave() ); } );
+ rp.slave().setSlaveOk();
+
+ ma = rp.master().getDB( baseName ).a;
+ ma.save( {} );
+ sa = rp.slave().getDB( baseName ).a;
+ assert.soon( function() { return 1 == sa.count(); } );
+
+ rp.killNode( rp.slave(), signal );
+ rp.waitForSteadyState( [ 1, null ] );
+ ma.save( {} );
+
+ rp.start( true );
+ rp.waitForSteadyState();
+ assert.soon( function() { return auth( rp.slave() ); } );
+ rp.slave().setSlaveOk();
+ sa = rp.slave().getDB( baseName ).a;
+ assert.soon( function() { return 2 == sa.count(); } );
+
+ ma.save( {a:1} );
+ assert.soon( function() { return 1 == sa.count( {a:1} ); } );
+
+ ma.update( {a:1}, {b:2} );
+ assert.soon( function() { return 1 == sa.count( {b:2} ); } );
+
+ ma.remove( {b:2} );
+ assert.soon( function() { return 0 == sa.count( {b:2} ); } );
+
+ rp.killNode( rp.master(), signal );
+ rp.waitForSteadyState( [ 1, null ] );
+ ma = sa;
+ ma.save( {} );
+
+ rp.start( true );
+ rp.waitForSteadyState();
+ assert.soon( function() { return auth( rp.slave() ); } );
+ rp.slave().setSlaveOk();
+ sa = rp.slave().getDB( baseName ).a;
+ assert.soon( function() { return 3 == sa.count(); } );
+
+ ma.save( {} );
+ assert.soon( function() { return 4 == sa.count(); } );
+
+ ports.forEach( function( x ) { stopMongod( x ); } );
+}
+
+doTest( 15 ); // SIGTERM
+doTest( 9 ); // SIGKILL
diff --git a/jstests/repl/repl10.js b/jstests/repl/repl10.js
new file mode 100644
index 0000000..67c5db1
--- /dev/null
+++ b/jstests/repl/repl10.js
@@ -0,0 +1,38 @@
+// Test slave delay
+
+var baseName = "jstests_repl10test";
+
+soonCount = function( count ) {
+ assert.soon( function() {
+ // print( "check count" );
+ // print( "count: " + s.getDB( baseName ).z.find().count() );
+ return s.getDB( baseName ).a.find().count() == count;
+ } );
+}
+
+doTest = function( signal ) {
+
+ rt = new ReplTest( "repl10tests" );
+
+ m = rt.start( true );
+ s = rt.start( false, { "slavedelay": "10" } );
+
+ am = m.getDB( baseName ).a
+
+ am.save( {i:1} );
+
+ soonCount( 1 );
+
+ am.save( {i:2} );
+ assert.eq( 2, am.count() );
+ sleep( 3000 );
+
+ rt.stop( true, signal );
+ sleep( 3000 );
+ assert.eq( 1, s.getDB( baseName ).a.count() );
+
+ rt.stop();
+}
+
+doTest( 15 ); // SIGTERM
+doTest( 9 ); // SIGKILL
diff --git a/jstests/repl/repl11.js b/jstests/repl/repl11.js
new file mode 100644
index 0000000..c5c63b3
--- /dev/null
+++ b/jstests/repl/repl11.js
@@ -0,0 +1,59 @@
+// Test repl with auth enabled
+
+var baseName = "jstests_repl11test";
+
+setAdmin = function( n ) {
+ n.getDB( "admin" ).addUser( "super", "super" );
+ n.getDB( "local" ).addUser( "repl", "foo" );
+ n.getDB( "local" ).system.users.findOne();
+}
+
+auth = function( n ) {
+ return n.getDB( baseName ).auth( "test", "test" );
+}
+
+doTest = function( signal ) {
+
+ rt = new ReplTest( baseName );
+
+ m = rt.start( true, {}, false, true );
+ m.getDB( baseName ).addUser( "test", "test" );
+ setAdmin( m );
+ rt.stop( true );
+
+ s = rt.start( false, {}, false, true );
+ setAdmin( s );
+ rt.stop( false );
+
+ m = rt.start( true, { auth:null }, true );
+ auth( m );
+ s = rt.start( false, { auth:null }, true );
+ assert.soon( function() { return auth( s ); } );
+
+ ma = m.getDB( baseName ).a;
+ ma.save( {} );
+ sa = s.getDB( baseName ).a;
+ assert.soon( function() { return 1 == sa.count(); } );
+
+ rt.stop( false, signal );
+
+ ma.save( {} );
+ s = rt.start( false, { auth:null }, true );
+ assert.soon( function() { return auth( s ); } );
+ sa = s.getDB( baseName ).a;
+ assert.soon( function() { return 2 == sa.count(); } );
+
+ ma.save( {a:1} );
+ assert.soon( function() { return 1 == sa.count( {a:1} ); } );
+
+ ma.update( {a:1}, {b:2} );
+ assert.soon( function() { return 1 == sa.count( {b:2} ); } );
+
+ ma.remove( {b:2} );
+ assert.soon( function() { return 0 == sa.count( {b:2} ); } );
+
+ rt.stop();
+}
+
+doTest( 15 ); // SIGTERM
+doTest( 9 ); // SIGKILL
diff --git a/jstests/repl/repl4.js b/jstests/repl/repl4.js
index de7ca43..56719b7 100644
--- a/jstests/repl/repl4.js
+++ b/jstests/repl/repl4.js
@@ -25,6 +25,14 @@ doTest = function() {
printjson( s.getDBNames() );
assert.eq( -1, s.getDBNames().indexOf( "b" ) );
assert.eq( 0, s.getDB( "b" ).b.find().count() );
+
+ rt.stop( false );
+
+ cm.save( { x:3 } );
+ bm.save( { x:4 } );
+
+ s = rt.start( false, { only: "c" }, true );
+ soonCount( "c", "c", 2 );
}
doTest();
diff --git a/jstests/repl/replacePeer1.js b/jstests/repl/replacePeer1.js
index 45ee544..b3743ce 100644
--- a/jstests/repl/replacePeer1.js
+++ b/jstests/repl/replacePeer1.js
@@ -38,7 +38,8 @@ doTest = function( signal ) {
rp = new ReplPair( l, r, a );
rp.start();
- rp.waitForSteadyState( [ 1, 0 ], rp.right().host );
+ rp.waitForSteadyState( [ 1, 0 ] );
+ rightMaster = ( rp.master().host == rp.right().host );
checkWrite( rp.master(), rp.slave() );
@@ -51,16 +52,26 @@ doTest = function( signal ) {
rp.killNode( rp.master(), signal );
rp.killNode( rp.arbiter(), signal );
- o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 3 ], "127.0.0.1:" + ports[ 0 ] );
- r = new MongodRunner( ports[ 3 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
+ if ( rightMaster ) {
+ o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 3 ], "127.0.0.1:" + ports[ 0 ] );
+ r = new MongodRunner( ports[ 3 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
+ rp = new ReplPair( o, r, a );
+ resetDbpath( "/data/db/" + baseName + "-left" );
+ } else {
+ l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
+ o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
+ rp = new ReplPair( l, o, a );
+ resetDbpath( "/data/db/" + baseName + "-right" );
+ }
- rp = new ReplPair( o, r, a );
- resetDbpath( "/data/db/" + baseName + "-left" );
rp.start( true );
- rp.waitForSteadyState( [ 1, 0 ], rp.right().host );
+ rp.waitForSteadyState( [ 1, 0 ] );
- checkWrite( rp.master(), rp.slave() );
rp.slave().setSlaveOk();
+ assert.eq( 2, rp.master().getDB( baseName ).z.find().toArray().length );
+ assert.eq( 2, rp.slave().getDB( baseName ).z.find().toArray().length );
+
+ checkWrite( rp.master(), rp.slave() );
assert.eq( 3, rp.slave().getDB( baseName ).z.find().toArray().length );
ports.forEach( function( x ) { stopMongod( x ); } );
diff --git a/jstests/repl/replacePeer2.js b/jstests/repl/replacePeer2.js
index 09c8177..f519b17 100644
--- a/jstests/repl/replacePeer2.js
+++ b/jstests/repl/replacePeer2.js
@@ -38,8 +38,9 @@ doTest = function( signal ) {
rp = new ReplPair( l, r, a );
rp.start();
- rp.waitForSteadyState( [ 1, 0 ], rp.right().host );
-
+ rp.waitForSteadyState( [ 1, 0 ] );
+ leftSlave = ( rp.slave().host == rp.left().host );
+
checkWrite( rp.master(), rp.slave() );
// allow slave to finish initial sync
@@ -52,16 +53,26 @@ doTest = function( signal ) {
ports.forEach( function( x ) { stopMongod( x, signal ); } );
- l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
- o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
-
- rp = new ReplPair( l, o, a );
- resetDbpath( "/data/db/" + baseName + "-right" );
- rp.start( true );
- rp.waitForSteadyState( [ 1, 0 ], rp.left().host );
+ if ( leftSlave ) {
+ l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
+ o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
+ rp = new ReplPair( l, o, a );
+ resetDbpath( "/data/db/" + baseName + "-right" );
+ } else {
+ o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 3 ], "127.0.0.1:" + ports[ 0 ] );
+ r = new MongodRunner( ports[ 3 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
+ rp = new ReplPair( o, r, a );
+ resetDbpath( "/data/db/" + baseName + "-left" );
+ }
- checkWrite( rp.master(), rp.slave() );
+ rp.start( true );
+ rp.waitForSteadyState( [ 1, 0 ] );
+
rp.slave().setSlaveOk();
+ assert.eq( 1, rp.slave().getDB( baseName ).z.find().toArray().length );
+ assert.eq( 1, rp.master().getDB( baseName ).z.find().toArray().length );
+
+ checkWrite( rp.master(), rp.slave() );
assert.eq( 2, rp.slave().getDB( baseName ).z.find().toArray().length );
ports.forEach( function( x ) { stopMongod( x ); } );
diff --git a/jstests/repl/snapshot1.js b/jstests/repl/snapshot1.js
new file mode 100644
index 0000000..3be37aa
--- /dev/null
+++ b/jstests/repl/snapshot1.js
@@ -0,0 +1,34 @@
+// Test SERVER-623 - starting slave from a new snapshot
+
+ports = allocatePorts( 3 );
+
+var baseName = "repl_snapshot1";
+
+rt1 = new ReplTest( "repl_snapshot1-1", [ ports[ 0 ], ports[ 1 ] ] );
+rt2 = new ReplTest( "repl_snapshot1-2", [ ports[ 0 ], ports[ 2 ] ] );
+m = rt1.start( true );
+
+big = new Array( 2000 ).toString();
+for( i = 0; i < 1000; ++i )
+ m.getDB( baseName )[ baseName ].save( { _id: new ObjectId(), i: i, b: big } );
+
+m.getDB( "admin" ).runCommand( {fsync:1,lock:1} );
+copyDbpath( rt1.getPath( true ), rt1.getPath( false ) );
+m.getDB( "admin" ).$cmd.sys.unlock.findOne();
+
+s1 = rt1.start( false, null, true );
+assert.eq( 1000, s1.getDB( baseName )[ baseName ].count() );
+m.getDB( baseName )[ baseName ].save( {i:1000} );
+assert.soon( function() { return 1001 == s1.getDB( baseName )[ baseName ].count(); } );
+
+s1.getDB( "admin" ).runCommand( {fsync:1,lock:1} );
+copyDbpath( rt1.getPath( false ), rt2.getPath( false ) );
+s1.getDB( "admin" ).$cmd.sys.unlock.findOne();
+
+s2 = rt2.start( false, null, true );
+assert.eq( 1001, s2.getDB( baseName )[ baseName ].count() );
+m.getDB( baseName )[ baseName ].save( {i:1001} );
+assert.soon( function() { return 1002 == s2.getDB( baseName )[ baseName ].count(); } );
+assert.soon( function() { return 1002 == s1.getDB( baseName )[ baseName ].count(); } );
+
+assert( !rawMongoProgramOutput().match( /resync/ ) ); \ No newline at end of file
diff --git a/jstests/repl/snapshot2.js b/jstests/repl/snapshot2.js
new file mode 100644
index 0000000..4ebd786
--- /dev/null
+++ b/jstests/repl/snapshot2.js
@@ -0,0 +1,50 @@
+// Test SERVER-623 - starting repl peer from a new snapshot of master
+
+ports = allocatePorts( 3 );
+
+var baseName = "repl_snapshot2";
+var basePath = "/data/db/" + baseName;
+
+a = new MongodRunner( ports[ 0 ], basePath + "-arbiter" );
+l = new MongodRunner( ports[ 1 ], basePath + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
+r = new MongodRunner( ports[ 2 ], basePath + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
+
+rp = new ReplPair( l, r, a );
+rp.start();
+rp.waitForSteadyState();
+
+big = new Array( 2000 ).toString();
+rp.slave().setSlaveOk();
+for( i = 0; i < 1000; ++i ) {
+ rp.master().getDB( baseName )[ baseName ].save( { _id: new ObjectId(), i: i, b: big } );
+ if ( i % 250 == 249 ) {
+ assert.soon( function() { return i+1 == rp.slave().getDB( baseName )[ baseName ].count(); } );
+ }
+}
+
+rp.master().getDB( "admin" ).runCommand( {fsync:1,lock:1} );
+leftMaster = ( rp.master().host == rp.left().host );
+rp.killNode( rp.slave() );
+if ( leftMaster ) {
+ copyDbpath( basePath + "-left", basePath + "-right" );
+} else {
+ copyDbpath( basePath + "-right", basePath + "-left" );
+}
+rp.master().getDB( "admin" ).$cmd.sys.unlock.findOne();
+rp.killNode( rp.master() );
+
+clearRawMongoProgramOutput();
+
+rp.right_.extraArgs_ = [ "--fastsync" ];
+rp.left_.extraArgs_ = [ "--fastsync" ];
+
+rp.start( true );
+rp.waitForSteadyState();
+assert.eq( 1000, rp.master().getDB( baseName )[ baseName ].count() );
+rp.slave().setSlaveOk();
+assert.eq( 1000, rp.slave().getDB( baseName )[ baseName ].count() );
+rp.master().getDB( baseName )[ baseName ].save( {i:1000} );
+assert.soon( function() { return 1001 == rp.slave().getDB( baseName )[ baseName ].count(); } );
+
+assert( !rawMongoProgramOutput().match( /resync/ ) );
+assert( !rawMongoProgramOutput().match( /SyncException/ ) ); \ No newline at end of file
diff --git a/jstests/repl/snapshot3.js b/jstests/repl/snapshot3.js
new file mode 100644
index 0000000..5380bbf
--- /dev/null
+++ b/jstests/repl/snapshot3.js
@@ -0,0 +1,50 @@
+// Test SERVER-623 - starting repl peer from a new snapshot of slave
+
+ports = allocatePorts( 3 );
+
+var baseName = "repl_snapshot2";
+var basePath = "/data/db/" + baseName;
+
+a = new MongodRunner( ports[ 0 ], basePath + "-arbiter" );
+l = new MongodRunner( ports[ 1 ], basePath + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
+r = new MongodRunner( ports[ 2 ], basePath + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
+
+rp = new ReplPair( l, r, a );
+rp.start();
+rp.waitForSteadyState();
+
+big = new Array( 2000 ).toString();
+rp.slave().setSlaveOk();
+for( i = 0; i < 1000; ++i ) {
+ rp.master().getDB( baseName )[ baseName ].save( { _id: new ObjectId(), i: i, b: big } );
+ if ( i % 250 == 249 ) {
+ assert.soon( function() { return i+1 == rp.slave().getDB( baseName )[ baseName ].count(); } );
+ }
+}
+
+rp.slave().getDB( "admin" ).runCommand( {fsync:1,lock:1} );
+leftSlave = ( rp.slave().host == rp.left().host );
+rp.killNode( rp.master() );
+if ( leftSlave ) {
+ copyDbpath( basePath + "-left", basePath + "-right" );
+} else {
+ copyDbpath( basePath + "-right", basePath + "-left" );
+}
+rp.slave().getDB( "admin" ).$cmd.sys.unlock.findOne();
+rp.killNode( rp.slave() );
+
+clearRawMongoProgramOutput();
+
+rp.right_.extraArgs_ = [ "--fastsync" ];
+rp.left_.extraArgs_ = [ "--fastsync" ];
+
+rp.start( true );
+rp.waitForSteadyState();
+assert.eq( 1000, rp.master().getDB( baseName )[ baseName ].count() );
+rp.slave().setSlaveOk();
+assert.eq( 1000, rp.slave().getDB( baseName )[ baseName ].count() );
+rp.master().getDB( baseName )[ baseName ].save( {i:1000} );
+assert.soon( function() { return 1001 == rp.slave().getDB( baseName )[ baseName ].count(); } );
+
+assert( !rawMongoProgramOutput().match( /resync/ ) );
+assert( !rawMongoProgramOutput().match( /SyncException/ ) ); \ No newline at end of file
diff --git a/jstests/run_program1.js b/jstests/run_program1.js
new file mode 100644
index 0000000..7a994b2
--- /dev/null
+++ b/jstests/run_program1.js
@@ -0,0 +1,19 @@
+if ( ! _isWindows() ) {
+
+ // note that normal program exit returns 0
+ assert.eq (0, runProgram('true'))
+ assert.neq(0, runProgram('false'))
+ assert.neq(0, runProgram('this_program_doesnt_exit'));
+
+ //verify output visually
+ runProgram('echo', 'Hello', 'World.', 'How are you?');
+ runProgram('bash', '-c', 'echo Hello World. "How are you?"'); // only one space is printed between Hello and World
+
+ // numbers can be passed as numbers or strings
+ runProgram('sleep', 0.5);
+ runProgram('sleep', '0.5');
+
+} else {
+
+ runProgram('cmd', '/c', 'echo hello windows');
+}
diff --git a/jstests/set5.js b/jstests/set5.js
new file mode 100644
index 0000000..10f26ad
--- /dev/null
+++ b/jstests/set5.js
@@ -0,0 +1,17 @@
+
+t = db.set5;
+t.drop();
+
+function check( want , err ){
+ var x = t.findOne();
+ delete x._id;
+ assert.eq( want , x , err );
+}
+
+t.update( { a : 5 } , { $set : { a : 6 , b : null } } , true );
+check( { a : 6 , b : null } , "A" )
+
+t.drop();
+
+t.update( { z : 5 } , { $set : { z : 6 , b : null } } , true );
+check( { b : null , z : 6 } , "B" )
diff --git a/jstests/set6.js b/jstests/set6.js
new file mode 100644
index 0000000..d41e7ab
--- /dev/null
+++ b/jstests/set6.js
@@ -0,0 +1,20 @@
+
+t = db.set6;
+t.drop();
+
+x = { _id : 1 , r : new DBRef( "foo" , new ObjectId() ) }
+t.insert( x )
+assert.eq( x , t.findOne() , "A" );
+
+x.r.$id = new ObjectId()
+t.update({}, { $set : { r : x.r } } );
+assert.eq( x , t.findOne() , "B");
+
+x.r2 = new DBRef( "foo2" , 5 )
+t.update( {} , { $set : { "r2" : x.r2 } } );
+assert.eq( x , t.findOne() , "C" )
+
+x.r.$id = 2;
+t.update( {} , { $set : { "r.$id" : 2 } } )
+assert.eq( x.r.$id , t.findOne().r.$id , "D");
+
diff --git a/jstests/set7.js b/jstests/set7.js
new file mode 100644
index 0000000..b46fe9e
--- /dev/null
+++ b/jstests/set7.js
@@ -0,0 +1,40 @@
+// test $set with array indicies
+
+t = db.jstests_set7;
+
+t.drop();
+
+t.save( {a:[0,1,2,3]} );
+t.update( {}, {$set:{"a.0":2}} );
+assert.eq( [2,1,2,3], t.findOne().a );
+
+t.update( {}, {$set:{"a.4":5}} );
+assert.eq( [2,1,2,3,5], t.findOne().a );
+
+t.update( {}, {$set:{"a.9":9}} );
+assert.eq( [2,1,2,3,5,null,null,null,null,9], t.findOne().a );
+
+t.drop();
+t.save( {a:[0,1,2,3]} );
+t.update( {}, {$set:{"a.9":9,"a.7":7}} );
+assert.eq( [0,1,2,3,null,null,null,7,null,9], t.findOne().a );
+
+t.drop();
+t.save( {a:[0,1,2,3,4,5,6,7,8,9,10]} );
+t.update( {}, {$set:{"a.11":11} } );
+assert.eq( [0,1,2,3,4,5,6,7,8,9,10,11], t.findOne().a );
+
+t.drop();
+t.save( {} );
+t.update( {}, {$set:{"a.0":4}} );
+assert.eq( {"0":4}, t.findOne().a );
+
+t.drop();
+t.update( {"a.0":4}, {$set:{b:1}}, true );
+assert.eq( {"0":4}, t.findOne().a );
+
+t.drop();
+t.save( {a:[]} );
+t.update( {}, {$set:{"a.f":1}} );
+assert( db.getLastError() );
+assert.eq( [], t.findOne().a );
diff --git a/jstests/sharding/findandmodify1.js b/jstests/sharding/findandmodify1.js
new file mode 100644
index 0000000..774701f
--- /dev/null
+++ b/jstests/sharding/findandmodify1.js
@@ -0,0 +1,57 @@
+s = new ShardingTest( "find_and_modify_sharded" , 2 );
+
+s.adminCommand( { enablesharding : "test" } );
+db = s.getDB( "test" );
+primary = s.getServer( "test" ).getDB( "test" );
+seconday = s.getOther( primary ).getDB( "test" );
+
+numObjs = 20;
+
+s.adminCommand( { shardcollection : "test.stuff" , key : {_id:1} } );
+
+for (var i=0; i < numObjs; i++){
+ db.stuff.insert({_id: i});
+}
+
+for (var i=0; i < numObjs; i+=2){
+ s.adminCommand( { split: "test.stuff" , middle : {_id: i} } );
+}
+
+for (var i=0; i < numObjs; i+=4){
+ s.adminCommand( { movechunk : "test.stuff" , find : {_id: i} , to : seconday.getMongo().name } );
+}
+
+//sorted update
+for (var i=0; i < numObjs; i++){
+ assert.eq(db.stuff.count({a:1}), i, "1 A");
+
+ var out = db.stuff.findAndModify({query: {a:null}, update: {$set: {a:1}}, sort: {_id:1}});
+
+ assert.eq(db.stuff.count({a:1}), i+1, "1 B");
+ assert.eq(db.stuff.findOne({_id:i}).a, 1, "1 C");
+ assert.eq(out._id, i, "1 D");
+}
+
+// unsorted update
+for (var i=0; i < numObjs; i++){
+ assert.eq(db.stuff.count({b:1}), i, "2 A");
+
+ var out = db.stuff.findAndModify({query: {b:null}, update: {$set: {b:1}}});
+
+ assert.eq(db.stuff.count({b:1}), i+1, "2 B");
+ assert.eq(db.stuff.findOne({_id:out._id}).a, 1, "2 C");
+}
+
+//sorted remove (no query)
+for (var i=0; i < numObjs; i++){
+ assert.eq(db.stuff.count(), numObjs - i, "3 A");
+ assert.eq(db.stuff.count({_id: i}), 1, "3 B");
+
+ var out = db.stuff.findAndModify({remove: true, sort: {_id:1}});
+
+ assert.eq(db.stuff.count(), numObjs - i - 1, "3 C");
+ assert.eq(db.stuff.count({_id: i}), 0, "3 D");
+ assert.eq(out._id, i, "3 E");
+}
+
+s.stop();
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
index 43e7cc5..d1644ac 100644
--- a/jstests/sharding/key_many.js
+++ b/jstests/sharding/key_many.js
@@ -1,13 +1,14 @@
// key_many.js
// values have to be sorted
-types =
- [ { name : "string" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield: "k" } ,
- { name : "double" , values : [ 1.2 , 3.5 , 4.5 , 4.6 , 6.7 , 9.9 ] , keyfield : "a" } ,
- { name : "string_id" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield : "_id" },
- { name : "embedded" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield : "a.b" } ,
- { name : "embedded 2" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield : "a.b.c" } ,
- { name : "object" , values : [ {a:1, b:1.2}, {a:1, b:3.5}, {a:1, b:4.5}, {a:2, b:1.2}, {a:2, b:3.5}, {a:2, b:4.5} ] , keyfield : "o" } ,
+types = [
+ { name : "string" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield: "k" } ,
+ { name : "double" , values : [ 1.2 , 3.5 , 4.5 , 4.6 , 6.7 , 9.9 ] , keyfield : "a" } ,
+ { name : "date" , values : [ new Date( 1000000 ) , new Date( 2000000 ) , new Date( 3000000 ) , new Date( 4000000 ) , new Date( 5000000 ) , new Date( 6000000 ) ] , keyfield : "a" } ,
+ { name : "string_id" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield : "_id" },
+ { name : "embedded" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield : "a.b" } ,
+ { name : "embedded 2" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield : "a.b.c" } ,
+ { name : "object" , values : [ {a:1, b:1.2}, {a:1, b:3.5}, {a:1, b:4.5}, {a:2, b:1.2}, {a:2, b:3.5}, {a:2, b:4.5} ] , keyfield : "o" } ,
]
s = new ShardingTest( "key_many" , 2 );
diff --git a/jstests/sharding/moveshard1.js b/jstests/sharding/moveshard1.js
index b074b4c..9220983 100644
--- a/jstests/sharding/moveshard1.js
+++ b/jstests/sharding/moveshard1.js
@@ -16,8 +16,8 @@ assert.eq( ldb.things.count() , 3 );
assert.eq( rdb.things.count() , 0 );
startResult = l.getDB( "admin" ).runCommand( { "movechunk.start" : "foo.things" ,
- "to" : s._serverNames[1] ,
- "from" : s._serverNames[0] ,
+ "to" : s._connections[1].name ,
+ "from" : s._connections[0].name ,
filter : { a : { $gt : 2 } }
} );
print( "movechunk.start: " + tojson( startResult ) );
@@ -25,7 +25,7 @@ assert( startResult.ok == 1 , "start failed!" );
finishResult = l.getDB( "admin" ).runCommand( { "movechunk.finish" : "foo.things" ,
finishToken : startResult.finishToken ,
- to : s._serverNames[1] ,
+ to : s._connections[1].name ,
newVersion : 1 } );
print( "movechunk.finish: " + tojson( finishResult ) );
assert( finishResult.ok == 1 , "finishResult failed!" );
diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js
index 566a0db..5932210 100644
--- a/jstests/sharding/shard2.js
+++ b/jstests/sharding/shard2.js
@@ -140,7 +140,7 @@ function countCursor( c ){
return num;
}
assert.eq( 6 , countCursor( db.foo.find()._exec() ) , "getMore 2" );
-assert.eq( 6 , countCursor( db.foo.find().limit(1)._exec() ) , "getMore 3" );
+assert.eq( 6 , countCursor( db.foo.find().batchSize(1)._exec() ) , "getMore 3" );
// find by non-shard-key
db.foo.find().forEach(
diff --git a/jstests/sharding/sync1.js b/jstests/sharding/sync1.js
new file mode 100644
index 0000000..905b488
--- /dev/null
+++ b/jstests/sharding/sync1.js
@@ -0,0 +1,21 @@
+
+test = new SyncCCTest( "sync1" )
+
+db = test.conn.getDB( "test" )
+t = db.sync1
+t.save( { x : 1 } )
+assert.eq( 1 , t.find().itcount() , "A1" );
+assert.eq( 1 , t.find().count() , "A2" );
+t.save( { x : 2 } )
+assert.eq( 2 , t.find().itcount() , "A3" );
+assert.eq( 2 , t.find().count() , "A4" );
+
+test.checkHashes( "test" , "A3" );
+
+test.tempKill();
+assert.throws( function(){ t.save( { x : 3 } ) } , "B1" )
+assert.eq( 2 , t.find().itcount() , "B2" );
+test.tempStart();
+test.checkHashes( "test" , "B3" );
+
+test.stop();
diff --git a/jstests/sharding/sync2.js b/jstests/sharding/sync2.js
new file mode 100644
index 0000000..b0bbcb6
--- /dev/null
+++ b/jstests/sharding/sync2.js
@@ -0,0 +1,48 @@
+// sync2.js
+
+s = new ShardingTest( "sync2" , 3 , 50 , 2 , { sync : true } );
+
+s2 = s._mongos[1];
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+
+s.getDB( "test" ).foo.save( { num : 1 } );
+s.getDB( "test" ).foo.save( { num : 2 } );
+s.getDB( "test" ).foo.save( { num : 3 } );
+s.getDB( "test" ).foo.save( { num : 4 } );
+s.getDB( "test" ).foo.save( { num : 5 } );
+s.getDB( "test" ).foo.save( { num : 6 } );
+s.getDB( "test" ).foo.save( { num : 7 } );
+
+assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal A" );
+assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other A" );
+
+s.adminCommand( { split : "test.foo" , middle : { num : 4 } } );
+s.adminCommand( { movechunk : "test.foo" , find : { num : 3 } , to : s.getFirstOther( s.getServer( "test" ) ).name } );
+
+assert( s._connections[0].getDB( "test" ).foo.find().toArray().length > 0 , "blah 1" );
+assert( s._connections[1].getDB( "test" ).foo.find().toArray().length > 0 , "blah 2" );
+assert.eq( 7 , s._connections[0].getDB( "test" ).foo.find().toArray().length +
+ s._connections[1].getDB( "test" ).foo.find().toArray().length , "blah 3" );
+
+assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B" );
+assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B" );
+
+s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
+s.printChunks();
+
+print( "* A" );
+
+assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B 1" );
+assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B 2" );
+print( "* B" );
+assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B 3" );
+assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B 4" );
+
+for ( var i=0; i<10; i++ ){
+ print( "* C " + i );
+ assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B " + i );
+}
+
+s.stop();
diff --git a/jstests/shellkillop.js b/jstests/shellkillop.js
new file mode 100644
index 0000000..e8a9763
--- /dev/null
+++ b/jstests/shellkillop.js
@@ -0,0 +1,18 @@
+baseName = "jstests_shellkillop";
+
+db[ baseName ].drop();
+
+for( i = 0; i < 100000; ++i ) {
+ db[ baseName ].save( {i:1} );
+}
+assert.eq( 100000, db[ baseName ].count() );
+
+spawn = startMongoProgramNoConnect( "mongo", "--port", myPort(), "--eval", "db." + baseName + ".update( {}, {$set:{i:\"abcdefghijkl\"}}, false, true ); db." + baseName + ".count();" );
+sleep( 100 );
+stopMongoProgramByPid( spawn );
+sleep( 100 );
+inprog = db.currentOp().inprog
+printjson( inprog );
+for( i in inprog ) {
+ assert( inprog[ i ].ns != "test." + baseName, "still running op" );
+}
diff --git a/jstests/shellspawn.js b/jstests/shellspawn.js
index ea2b671..5b0de6b 100644
--- a/jstests/shellspawn.js
+++ b/jstests/shellspawn.js
@@ -6,13 +6,13 @@ if ( typeof( _startMongoProgram ) == "undefined" ){
print( "no fork support" );
}
else {
- spawn = startMongoProgramNoConnect( "mongo", "--port", myPort(), "--eval", "sleep( 2000 ); db.getCollection( \"" + baseName + "\" ).save( {a:1} );" );
+ spawn = startMongoProgramNoConnect( "mongo", "--port", myPort(), "--eval", "sleep( 2000 ); db.getCollection( '" + baseName + "' ).save( {a:1} );" );
assert.soon( function() { return 1 == t.count(); } );
stopMongoProgramByPid( spawn );
- spawn = startMongoProgramNoConnect( "mongo", "--port", myPort(), "--eval", "print( \"I am a shell\" );" );
+ spawn = startMongoProgramNoConnect( "mongo", "--port", myPort(), "--eval", "print( 'I am a shell' );" );
spawn = startMongoProgramNoConnect( "mongo", "--port", myPort() );
@@ -21,4 +21,4 @@ else {
stopMongoProgramByPid( spawn );
// all these shells should be killed
-} \ No newline at end of file
+}
diff --git a/jstests/slow/indexbg1.js b/jstests/slow/indexbg1.js
new file mode 100644
index 0000000..5e34d44
--- /dev/null
+++ b/jstests/slow/indexbg1.js
@@ -0,0 +1,117 @@
+// Test background index creation
+
+parallel = function() {
+ return db[ baseName + "_parallelStatus" ];
+}
+
+resetParallel = function() {
+ parallel().drop();
+}
+
+doParallel = function(work) {
+ resetParallel();
+ print("doParallel: " + work);
+ startMongoProgramNoConnect("mongo", "--eval", work + "; db." + baseName + "_parallelStatus.save( {done:1} );", db.getMongo().host);
+}
+
+doneParallel = function() {
+ return !!parallel().findOne();
+}
+
+waitParallel = function() {
+ assert.soon( function() { return doneParallel(); }, "parallel did not finish in time", 300000, 1000 );
+}
+
+// waiting on SERVER-620
+
+print( "index11.js host:" );
+print( db.getMongo().host );
+
+if (1) {
+
+size = 500000;
+while( 1 ) { // if indexing finishes before we can run checks, try indexing w/ more data
+ print( "size: " + size );
+ baseName = "jstests_index11";
+ fullName = "db." + baseName;
+ t = db[ baseName ];
+ t.drop();
+
+ db.eval( function( size ) {
+ for( i = 0; i < size; ++i ) {
+ db.jstests_index11.save( {i:i} );
+ }
+ },
+ size );
+ assert.eq( size, t.count() );
+
+ doParallel( fullName + ".ensureIndex( {i:1}, {background:true} )" );
+ try {
+ // wait for indexing to start
+ print("wait for indexing to start");
+ assert.soon( function() { return 2 == db.system.indexes.count( {ns:"test."+baseName} ) }, "no index created", 30000, 50 );
+ print("started.");
+ assert.eq( size, t.count() );
+ assert.eq( 100, t.findOne( {i:100} ).i );
+ q = t.find();
+ for( i = 0; i < 120; ++i ) { // getmore
+ q.next();
+ assert( q.hasNext(), "no next" );
+ }
+ assert.eq( "BasicCursor", t.find( {i:100} ).explain().cursor, "used btree cursor" );
+ t.remove( {i:40} );
+ t.update( {i:10}, {i:-10} );
+ id = t.find().hint( {$natural:-1} )._id;
+ t.update( {_id:id}, {i:-2} );
+ t.save( {i:-50} );
+ t.save( {i:size+2} );
+ assert( !db.getLastError() );
+
+ print("calling ensureIndex");
+ t.ensureIndex( {i:1} );
+
+ printjson( db.getLastError() );
+ assert( db.getLastError() );
+ assert.eq( size + 1, t.count() );
+ assert( !db.getLastError() );
+
+ print("calling dropIndex");
+ t.dropIndex( {i:1} );
+ printjson( db.getLastError() );
+ assert( db.getLastError() );
+ } catch( e ) {
+ // only a failure if we're still indexing
+ // wait for parallel status to update to reflect indexing status
+ print("caught exception");
+ sleep( 1000 );
+ if ( !doneParallel() ) {
+ throw e;
+ }
+ print("but that's OK")
+ }
+ if ( !doneParallel() ) {
+ break;
+ }
+ print( "indexing finished too soon, retrying..." );
+ size *= 2;
+ assert( size < 20000000, "unable to run checks in parallel with index creation" );
+}
+
+print("our tests done, waiting for parallel to finish");
+waitParallel();
+print("finished");
+
+assert.eq( "BtreeCursor i_1", t.find( {i:100} ).explain().cursor );
+assert.eq( 1, t.count( {i:-10} ) );
+assert.eq( 1, t.count( {i:-2} ) );
+assert.eq( 1, t.count( {i:-50} ) );
+assert.eq( 1, t.count( {i:size+2} ) );
+assert.eq( 0, t.count( {i:40} ) );
+assert( !db.getLastError() );
+print("about to drop index");
+t.dropIndex( {i:1} );
+printjson( db.getLastError() );
+assert( !db.getLastError() );
+
+} // if 1
+
diff --git a/jstests/slow/indexbg2.js b/jstests/slow/indexbg2.js
new file mode 100644
index 0000000..1830f42
--- /dev/null
+++ b/jstests/slow/indexbg2.js
@@ -0,0 +1,83 @@
+// Test background index creation w/ constraints
+
+parallel = function() {
+ return db[ baseName + "_parallelStatus" ];
+}
+
+resetParallel = function() {
+ parallel().drop();
+}
+
+doParallel = function( work ) {
+ resetParallel();
+ startMongoProgramNoConnect( "mongo", "--eval", work + "; db." + baseName + "_parallelStatus.save( {done:1} );", db.getMongo().host );
+}
+
+doneParallel = function() {
+ return !!parallel().findOne();
+}
+
+waitParallel = function() {
+ assert.soon( function() { return doneParallel(); }, "parallel did not finish in time", 300000, 1000 );
+}
+
+doTest = function(dropDups) {
+
+ size = 10000;
+ while (1) { // if indexing finishes before we can run checks, try indexing w/ more data
+ print("size: " + size);
+ baseName = "jstests_index12";
+ fullName = "db." + baseName;
+ t = db[baseName];
+ t.drop();
+
+ db.eval(function(size) {
+ for (i = 0; i < size; ++i) {
+ db.jstests_index12.save({ i: i });
+ }
+ },
+ size);
+ assert.eq(size, t.count());
+
+ doParallel(fullName + ".ensureIndex( {i:1}, {background:true, unique:true, dropDups:" + dropDups + "} )");
+ try {
+ // wait for indexing to start
+ assert.soon(function() { return 2 == db.system.indexes.count({ ns: "test." + baseName }) }, "no index created", 30000, 50);
+ t.save({ i: 0, n: true });
+ //printjson(db.getLastError());
+ t.save({ i: size - 1, n: true });
+ //printjson(db.getLastError());
+ } catch (e) {
+ // only a failure if we're still indexing
+ // wait for parallel status to update to reflect indexing status
+ sleep(1000);
+ if (!doneParallel()) {
+ throw e;
+ }
+ }
+ if (!doneParallel()) {
+ break;
+ }
+ print("indexing finished too soon, retrying...");
+ size *= 2;
+ assert(size < 5000000, "unable to run checks in parallel with index creation");
+ }
+
+ waitParallel();
+
+ if( dropDups == "true" ) {
+ assert.eq(size, t.find().toArray().length, "full query failed");
+ assert.eq(size, t.count(), "count failed");
+ }
+ else {
+ /* without dropdups, it could be that there is more than size now but the index failed
+ to build - which is valid. we check index isn't there.
+ */
+ if (t.count() != size)
+ assert.eq(1, t.getIndexes().length, "change in # of elems yet index is there");
+ }
+
+}
+
+doTest( "false" );
+doTest( "true" );
diff --git a/jstests/sort5.js b/jstests/sort5.js
index a589355..b90256e 100644
--- a/jstests/sort5.js
+++ b/jstests/sort5.js
@@ -15,7 +15,7 @@ assert(t.validate().valid, "A valid");
// test sorting on compound key involving _id
-// assert.eq( [4,2,3,1] , t.find().sort({"y.b": 1 , _id : -1 }).map( function(z){ return z.x; } ) , "B no index" );
-// t.ensureIndex({"y.b": 1, "_id": -1});
-// assert.eq( [4,2,3,1] , t.find().sort({"y.b": 1 , _id : -1 }).map( function(z){ return z.x; } ) , "B index" );
-// assert(t.validate().valid, "B valid");
+assert.eq( [4,2,3,1] , t.find().sort({"y.b": 1 , _id : -1 }).map( function(z){ return z.x; } ) , "B no index" );
+t.ensureIndex({"y.b": 1, "_id": -1});
+assert.eq( [4,2,3,1] , t.find().sort({"y.b": 1 , _id : -1 }).map( function(z){ return z.x; } ) , "B index" );
+assert(t.validate().valid, "B valid");
diff --git a/jstests/sort6.js b/jstests/sort6.js
new file mode 100644
index 0000000..027ba7a
--- /dev/null
+++ b/jstests/sort6.js
@@ -0,0 +1,38 @@
+
+t = db.sort6;
+
+function get( x ){
+ return t.find().sort( { c : x } ).map( function(z){ return z._id; } );
+}
+
+// part 1
+t.drop();
+
+t.insert({_id:1,c:null})
+t.insert({_id:2,c:1})
+t.insert({_id:3,c:2})
+
+
+assert.eq( [3,2,1] , get( -1 ) , "A1" ) // SERVER-635
+assert.eq( [1,2,3] , get( 1 ) , "A2" )
+
+t.ensureIndex( { c : 1 } );
+
+assert.eq( [3,2,1] , get( -1 ) , "B1" )
+assert.eq( [1,2,3] , get( 1 ) , "B2" )
+
+
+// part 2
+t.drop();
+
+t.insert({_id:1})
+t.insert({_id:2,c:1})
+t.insert({_id:3,c:2})
+
+assert.eq( [3,2,1] , get( -1 ) , "C1" ) // SERVER-635
+assert.eq( [1,2,3] , get( 1 ) , "C2" )
+
+t.ensureIndex( { c : 1 } );
+
+assert.eq( [3,2,1] , get( -1 ) , "D1" )
+assert.eq( [1,2,3] , get( 1 ) , "X2" )
diff --git a/jstests/storefunc.js b/jstests/storefunc.js
index bae1090..4cf7e30 100644
--- a/jstests/storefunc.js
+++ b/jstests/storefunc.js
@@ -12,6 +12,8 @@ assert.eq( 0 , s.count() , "setup - D" );
s.save( { _id : "x" , value : "4" } );
assert.eq( 1 , s.count() , "setup - E" );
+assert.eq( 4 , s.findOne( { _id : "x" } ).value , "E2 " );
+
assert.eq( 4 , s.findOne().value , "setup - F" );
s.update( { _id : "x" } , { $set : { value : 5 } } );
assert.eq( 1 , s.count() , "setup - G" );
@@ -29,3 +31,12 @@ assert.eq( 6 , db.eval( "return x" ) , "exec - 2 " );
s.insert( { _id : "bar" , value : function( z ){ return 17 + z; } } );
assert.eq( 22 , db.eval( "return bar(5);" ) , "exec - 3 " );
+
+assert( s.getIndexKeys().length > 0 , "no indexes" );
+assert( s.getIndexKeys()[0]._id , "no _id index" );
+
+assert.eq( "undefined" , db.eval( function(){ return typeof(zzz); } ) , "C1" );
+s.save( { _id : "zzz" , value : 5 } )
+assert.eq( "number" , db.eval( function(){ return typeof(zzz); } ) , "C2" );
+s.remove( { _id : "zzz" } );
+assert.eq( "undefined" , db.eval( function(){ return typeof(zzz); } ) , "C3" );
diff --git a/jstests/testminmax.js b/jstests/testminmax.js
new file mode 100644
index 0000000..803f1b4
--- /dev/null
+++ b/jstests/testminmax.js
@@ -0,0 +1,14 @@
+t = db.minmaxtest;
+t.drop();
+t.insert({"_id" : "IBM.N|00001264779918428889", "DESCRIPTION" : { "n" : "IBMSTK2", "o" : "IBM STK", "s" : "changed" } });
+t.insert({ "_id" : "VOD.N|00001264779918433344", "COMPANYNAME" : { "n" : "Vodafone Group PLC 2", "o" : "Vodafone Group PLC", "s" : "changed" } });
+t.insert({ "_id" : "IBM.N|00001264779918437075", "DESCRIPTION" : { "n" : "IBMSTK3", "o" : "IBM STK2", "s" : "changed" } });
+t.insert({ "_id" : "VOD.N|00001264779918441426", "COMPANYNAME" : { "n" : "Vodafone Group PLC 3", "o" : "Vodafone Group PLC 2", "s" : "changed" } });
+
+// temp:
+printjson( t.find().min({"_id":"IBM.N|00000000000000000000"}).max({"_id":"IBM.N|99999999999999999999"}).toArray() );
+
+// this should be 2!! add assertion when fixed
+// http://jira.mongodb.org/browse/SERVER-675
+print( t.find().min({"_id":"IBM.N|00000000000000000000"}).max({"_id":"IBM.N|99999999999999999999"}).count() );
+
diff --git a/jstests/tool/csv1.js b/jstests/tool/csv1.js
index df8aa10..edf9dc2 100644
--- a/jstests/tool/csv1.js
+++ b/jstests/tool/csv1.js
@@ -4,26 +4,25 @@ t = new ToolTest( "csv1" )
c = t.startDB( "foo" );
-base = { a : 1 , b : "foo,bar" , c: 5 };
+base = { a : 1 , b : "foo,bar\"baz,qux" , c: 5, 'd d': 6 };
assert.eq( 0 , c.count() , "setup1" );
c.insert( base );
delete base._id
assert.eq( 1 , c.count() , "setup2" );
-t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv" , "-f" , "a,b,c" )
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv" , "-f" , "a,b,c,d d" )
c.drop()
assert.eq( 0 , c.count() , "after drop" )
-t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c" );
-assert.soon( "c.findOne()" , "no data after sleep" );
-assert.eq( 2 , c.count() , "after restore 2" );
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c,d d" );
+assert.soon( "2 == c.count()" , "restore 2" );
a = c.find().sort( { a : 1 } ).toArray();
delete a[0]._id
delete a[1]._id
-assert.eq( tojson( { a : "a" , b : "b" , c : "c" } ) , tojson( a[1] ) , "csv parse 1" );
+assert.eq( tojson( { a : "a" , b : "b" , c : "c" , 'd d': "d d"} ) , tojson( a[1] ) , "csv parse 1" );
assert.eq( tojson( base ) , tojson(a[0]) , "csv parse 0" )
c.drop()
diff --git a/jstests/tool/tool1.js b/jstests/tool/tool1.js
index 00e92e7..91fce80 100644
--- a/jstests/tool/tool1.js
+++ b/jstests/tool/tool1.js
@@ -2,8 +2,8 @@
baseName = "jstests_tool_tool1";
dbPath = "/data/db/" + baseName + "/";
-externalPath = "/data/db/" + baseName + "_external/"
-externalFile = externalPath + "export.json"
+externalPath = "/data/db/" + baseName + "_external/";
+externalFile = externalPath + "export.json";
function fileSize(){
var l = listFiles( externalPath );
diff --git a/jstests/type1.js b/jstests/type1.js
index 94385fa..518e367 100644
--- a/jstests/type1.js
+++ b/jstests/type1.js
@@ -21,3 +21,4 @@ assert.eq( 1 , t.find( { x : { $type : 1 } } ).count() , "B2" );
assert.eq( 3 , t.find( { x : { $type : 2 } } ).count() , "B3" );
assert.eq( 0 , t.find( { x : { $type : 3 } } ).count() , "B4" );
assert.eq( 1 , t.find( { x : { $type : 1 } } ).explain().nscanned , "B5" );
+assert.eq( 1 , t.find( { x : { $regex:"f", $type : 2 } } ).count() , "B3" ); \ No newline at end of file
diff --git a/jstests/unset2.js b/jstests/unset2.js
new file mode 100644
index 0000000..e1dc445
--- /dev/null
+++ b/jstests/unset2.js
@@ -0,0 +1,23 @@
+t = db.unset2;
+t.drop();
+
+t.save( {a:["a","b","c","d"]} );
+t.update( {}, {$unset:{"a.3":1}} );
+assert.eq( ["a","b","c",null], t.findOne().a );
+t.update( {}, {$unset:{"a.1":1}} );
+assert.eq( ["a",null,"c",null], t.findOne().a );
+t.update( {}, {$unset:{"a.0":1}} );
+assert.eq( [null,null,"c",null], t.findOne().a );
+t.update( {}, {$unset:{"a.4":1}} );
+assert.eq( [null,null,"c",null], t.findOne().a ); // no change
+
+t.drop();
+t.save( {a:["a","b","c","d","e"]} );
+t.update( {}, {$unset:{"a.2":1},$set:{"a.3":3,"a.4":4,"a.5":5}} );
+assert.eq( ["a","b",null,3,4,5], t.findOne().a );
+
+t.drop();
+t.save( {a:["a","b","c","d","e"]} );
+t.update( {}, {$unset:{"a.2":1},$set:{"a.2":4}} );
+assert( db.getLastError() );
+assert.eq( ["a","b","c","d","e"], t.findOne().a ); \ No newline at end of file
diff --git a/jstests/update6.js b/jstests/update6.js
index 1f42fe5..f547677 100644
--- a/jstests/update6.js
+++ b/jstests/update6.js
@@ -10,7 +10,7 @@ assert.eq( "c,d" , Object.keySet( t.findOne().b ).toString() , "B" );
t.update( { a : 1 } , { $inc : { "b.0e" : 1 } } );
assert.eq( 1 , t.findOne().b["0e"] , "C" );
-assert.eq( "0e,c,d" , Object.keySet( t.findOne().b ).toString() , "D" );
+assert.eq( "c,d,0e" , Object.keySet( t.findOne().b ).toString() , "D" );
// -----
diff --git a/jstests/update_addToSet.js b/jstests/update_addToSet.js
new file mode 100644
index 0000000..123bacb
--- /dev/null
+++ b/jstests/update_addToSet.js
@@ -0,0 +1,41 @@
+
+t = db.update_addToSet1;
+t.drop();
+
+o = { _id : 1 , a : [ 2 , 1 ] }
+t.insert( o );
+
+assert.eq( o , t.findOne() , "A1" );
+
+t.update( {} , { $addToSet : { a : 3 } } );
+o.a.push( 3 );
+assert.eq( o , t.findOne() , "A2" );
+
+t.update( {} , { $addToSet : { a : 3 } } );
+assert.eq( o , t.findOne() , "A3" );
+
+// SERVER-628
+t.update( {} , { $addToSet : { a : { $each : [ 3 , 5 , 6 ] } } } );
+o.a.push( 5 )
+o.a.push( 6 )
+assert.eq( o , t.findOne() , "B1" )
+
+t.drop()
+o = { _id : 1 , a : [ 3 , 5 , 6 ] }
+t.insert( o );
+t.update( {} , { $addToSet : { a : { $each : [ 3 , 5 , 6 ] } } } );
+assert.eq( o , t.findOne() , "B2" );
+
+t.drop();
+t.update( { _id : 1 } , { $addToSet : { a : { $each : [ 3 , 5 , 6 ] } } } , true );
+assert.eq( o , t.findOne() , "B3" );
+t.update( { _id : 1 } , { $addToSet : { a : { $each : [ 3 , 5 , 6 ] } } } , true );
+assert.eq( o , t.findOne() , "B4" );
+
+
+// SERVER-630
+t.drop();
+t.update( { _id : 2 } , { $addToSet : { a : 3 } } , true );
+assert.eq( 1 , t.count() , "C1" );
+assert.eq( { _id : 2 , a : [ 3 ] } , t.findOne() , "C2" );
+
diff --git a/jstests/update_arraymatch1.js b/jstests/update_arraymatch1.js
new file mode 100644
index 0000000..521271d
--- /dev/null
+++ b/jstests/update_arraymatch1.js
@@ -0,0 +1,16 @@
+
+t = db.update_arraymatch1
+t.drop();
+
+o = { _id : 1 , a : [ { x : 1 , y : 1 } , { x : 2 , y : 2 } , { x : 3 , y : 3 } ] }
+t.insert( o );
+assert.eq( o , t.findOne() , "A1" );
+
+q = { "a.x" : 2 }
+t.update( q , { $set : { b : 5 } } )
+o.b = 5
+assert.eq( o , t.findOne() , "A2" )
+
+t.update( { "a.x" : 2 } , { $inc : { "a.$.y" : 1 } } )
+o.a[1].y++;
+assert.eq( o , t.findOne() , "A3" );
diff --git a/jstests/update_arraymatch2.js b/jstests/update_arraymatch2.js
new file mode 100644
index 0000000..7eb810b
--- /dev/null
+++ b/jstests/update_arraymatch2.js
@@ -0,0 +1,16 @@
+t = db.tilde;
+t.drop();
+
+t.insert( { } );
+t.insert( { x : [1,2,3] } );
+t.insert( { x : 99 } );
+t.update( {x : 2}, { $inc : { "x.$" : 1 } } , false, true );
+assert( t.findOne({x:1}).x[1] == 3, "A1" );
+
+t.insert( { x : { y : [8,7,6] } } )
+t.update( {'x.y' : 7}, { $inc : { "x.y.$" : 1 } } , false, true )
+assert.eq( 8 , t.findOne({"x.y" : 8}).x.y[1] , "B1" );
+
+t.insert( { x : [90,91,92], y : ['a', 'b', 'c'] } );
+t.update( { x : 92} , { $set : { 'y.$' : 'z' } }, false, true );
+assert.eq( 'z', t.findOne({x:92}).y[2], "B2" );
diff --git a/jstests/update_arraymatch3.js b/jstests/update_arraymatch3.js
new file mode 100644
index 0000000..116ac6b
--- /dev/null
+++ b/jstests/update_arraymatch3.js
@@ -0,0 +1,17 @@
+
+t = db.update_arraymatch3;
+t.drop();
+
+o = { _id : 1 ,
+ title : "ABC",
+ comments : [ { "by" : "joe", "votes" : 3 },
+ { "by" : "jane", "votes" : 7 }
+ ]
+ }
+
+t.save( o );
+assert.eq( o , t.findOne() , "A1" );
+
+t.update( {'comments.by':'joe'}, {$inc:{'comments.$.votes':1}}, false, true )
+o.comments[0].votes++;
+assert.eq( o , t.findOne() , "A2" );
diff --git a/jstests/updatec.js b/jstests/updatec.js
new file mode 100644
index 0000000..12b1325
--- /dev/null
+++ b/jstests/updatec.js
@@ -0,0 +1,14 @@
+
+t = db.updatec;
+t.drop();
+
+t.update( { "_id" : 123 }, { $set : { "v" : { "i" : 123, "a":456 } }, $push : { "f" : 234} }, 1, 0 );
+t.update( { "_id" : 123 }, { $set : { "v" : { "i" : 123, "a":456 } }, $push : { "f" : 234} }, 1, 0 );
+
+assert.eq(
+ {
+ "_id" : 123,
+ "f" : [ 234, 234 ] ,
+ "v" : { "i" : 123, "a" : 456 }
+ } , t.findOne() );
+
diff --git a/lib/libboost_thread-gcc41-mt-d-1_34_1.a b/lib/libboost_thread-gcc41-mt-d-1_34_1.a
new file mode 100644
index 0000000..09377ac
--- /dev/null
+++ b/lib/libboost_thread-gcc41-mt-d-1_34_1.a
Binary files differ
diff --git a/mongo.xcodeproj/project.pbxproj b/mongo.xcodeproj/project.pbxproj
index f8dabce..f0042da 100644
--- a/mongo.xcodeproj/project.pbxproj
+++ b/mongo.xcodeproj/project.pbxproj
@@ -28,6 +28,16 @@
9303D1B710E1415C00294FAC /* mr.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = mr.cpp; sourceTree = "<group>"; };
9303D1B810E1415C00294FAC /* update.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = update.cpp; sourceTree = "<group>"; };
9303D1B910E1415C00294FAC /* update.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = update.h; sourceTree = "<group>"; };
+ 9307500C114EA14700272A70 /* indexbg1.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = indexbg1.js; sourceTree = "<group>"; };
+ 9307500D114EA14700272A70 /* indexbg2.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = indexbg2.js; sourceTree = "<group>"; };
+ 9307500E114EA14700272A70 /* ns1.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = ns1.js; sourceTree = "<group>"; };
+ 93075092114EE1BA00272A70 /* dbhash.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = dbhash.js; sourceTree = "<group>"; };
+ 930750A7114EF4B100272A70 /* background.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = background.h; sourceTree = "<group>"; };
+ 930750A8114EFB9900272A70 /* update_addToSet.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = update_addToSet.js; sourceTree = "<group>"; };
+ 930750A9114EFB9900272A70 /* update_arraymatch1.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = update_arraymatch1.js; sourceTree = "<group>"; };
+ 930750AA114EFB9900272A70 /* update_arraymatch2.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = update_arraymatch2.js; sourceTree = "<group>"; };
+ 930750AB114EFB9900272A70 /* updateb.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = updateb.js; sourceTree = "<group>"; };
+ 930750AC114EFB9900272A70 /* updatec.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = updatec.js; sourceTree = "<group>"; };
930B844D0FA10D1C00F22B4B /* optime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = optime.h; sourceTree = "<group>"; };
931184DC0F83C95800A6DC44 /* message_server_port.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = message_server_port.cpp; sourceTree = "<group>"; };
931186FB0F8535FF00A6DC44 /* bridge.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = bridge.cpp; sourceTree = "<group>"; };
@@ -80,8 +90,6 @@
934223A00EF16DB400608550 /* dbeval.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = dbeval.cpp; sourceTree = "<group>"; };
934223A10EF16DB400608550 /* dbhelpers.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = dbhelpers.cpp; sourceTree = "<group>"; };
934223A20EF16DB400608550 /* dbhelpers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dbhelpers.h; sourceTree = "<group>"; };
- 934223A30EF16DB400608550 /* dbinfo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = dbinfo.cpp; sourceTree = "<group>"; };
- 934223A40EF16DB400608550 /* dbinfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dbinfo.h; sourceTree = "<group>"; };
934223A50EF16DB400608550 /* dbmessage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dbmessage.h; sourceTree = "<group>"; };
934223A60EF16DB400608550 /* dbwebserver.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = dbwebserver.cpp; sourceTree = "<group>"; };
934223A70EF16DB400608550 /* instance.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = instance.cpp; sourceTree = "<group>"; };
@@ -123,7 +131,6 @@
934BEBA410DFFA9600178102 /* array1.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = array1.js; sourceTree = "<group>"; };
934BEBA510DFFA9600178102 /* array3.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = array3.js; sourceTree = "<group>"; };
934BEBA610DFFA9600178102 /* auth1.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = auth1.js; sourceTree = "<group>"; };
- 934BEBA710DFFA9600178102 /* auth2.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = auth2.js; sourceTree = "<group>"; };
934BEBA810DFFA9600178102 /* autoid.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = autoid.js; sourceTree = "<group>"; };
934BEBA910DFFA9600178102 /* basic1.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = basic1.js; sourceTree = "<group>"; };
934BEBAA10DFFA9600178102 /* basic2.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = basic2.js; sourceTree = "<group>"; };
@@ -138,7 +145,6 @@
934BEBB310DFFA9600178102 /* basicb.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = basicb.js; sourceTree = "<group>"; };
934BEBB410DFFA9600178102 /* capped.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = capped.js; sourceTree = "<group>"; };
934BEBB510DFFA9600178102 /* capped1.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = capped1.js; sourceTree = "<group>"; };
- 934BEBB610DFFA9600178102 /* btree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = btree.h; sourceTree = "<group>"; };
934BEBB710DFFA9600178102 /* capped3.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = capped3.js; sourceTree = "<group>"; };
934BEBB810DFFA9600178102 /* capped4.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = capped4.js; sourceTree = "<group>"; };
934BEBBA10DFFA9600178102 /* clonecollection.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = clonecollection.js; sourceTree = "<group>"; };
@@ -403,10 +409,8 @@
936B895A0F4C899400934AF2 /* md5.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = md5.c; sourceTree = "<group>"; };
936B895B0F4C899400934AF2 /* md5.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = md5.h; sourceTree = "<group>"; };
936B895C0F4C899400934AF2 /* md5.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = md5.hpp; sourceTree = "<group>"; };
- 936B895D0F4C899400934AF2 /* md5main.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = md5main.c; sourceTree = "<group>"; };
936B895E0F4C899400934AF2 /* message.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = message.cpp; sourceTree = "<group>"; };
936B895F0F4C899400934AF2 /* message.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = message.h; sourceTree = "<group>"; };
- 936B89600F4C899400934AF2 /* top.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = top.h; sourceTree = "<group>"; };
937CACE90F27BF4900C57AA6 /* socktests.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = socktests.cpp; sourceTree = "<group>"; };
937D0E340F28CB070071FFA9 /* repltests.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = repltests.cpp; sourceTree = "<group>"; };
937D14AB0F2A225F0071FFA9 /* nonce.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = nonce.h; sourceTree = "<group>"; };
@@ -420,6 +424,12 @@
938A7A480F54873600FB7A07 /* reccache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = reccache.h; sourceTree = "<group>"; };
938A7A490F54873600FB7A07 /* reci.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = reci.h; sourceTree = "<group>"; };
938A7A4A0F54873600FB7A07 /* recstore.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = recstore.h; sourceTree = "<group>"; };
+ 938E5EB3110E1ED700A8760A /* repair.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = repair.js; sourceTree = "<group>"; };
+ 938E60AB110F721900A8760A /* perdbpath.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = perdbpath.js; sourceTree = "<group>"; };
+ 938E60AC110F734800A8760A /* directoryperdb.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = directoryperdb.js; sourceTree = "<group>"; };
+ 938E639B110FC66900A8760A /* auth1.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = auth1.js; sourceTree = "<group>"; };
+ 938E63D0110FC96B00A8760A /* auth2.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = auth2.js; sourceTree = "<group>"; };
+ 9391C9DD1120F9D300292B19 /* newcollection.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = newcollection.js; sourceTree = "<group>"; };
93A13A210F4620A500AF1B0D /* commands.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = commands.cpp; sourceTree = "<group>"; };
93A13A230F4620A500AF1B0D /* config.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = config.cpp; sourceTree = "<group>"; };
93A13A240F4620A500AF1B0D /* config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = config.h; sourceTree = "<group>"; };
@@ -433,7 +443,6 @@
93A13A330F4620E500AF1B0D /* dump.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = dump.cpp; sourceTree = "<group>"; };
93A13A350F4620E500AF1B0D /* export.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = export.cpp; sourceTree = "<group>"; };
93A13A370F4620E500AF1B0D /* files.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = files.cpp; sourceTree = "<group>"; };
- 93A13A390F4620E500AF1B0D /* importJSON.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = importJSON.cpp; sourceTree = "<group>"; };
93A13A3B0F4620E500AF1B0D /* restore.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = restore.cpp; sourceTree = "<group>"; };
93A13A3D0F4620E500AF1B0D /* sniffer.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = sniffer.cpp; sourceTree = "<group>"; };
93A13A3F0F4620E500AF1B0D /* Tool.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Tool.cpp; sourceTree = "<group>"; };
@@ -460,6 +469,15 @@
93B4A81B0F1C01D8000C862C /* lasterror.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = lasterror.cpp; sourceTree = "<group>"; };
93B4A8290F1C024C000C862C /* cursor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = cursor.cpp; sourceTree = "<group>"; };
93B4A82A0F1C0256000C862C /* pdfiletests.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = pdfiletests.cpp; sourceTree = "<group>"; };
+ 93B9F5A7112B12440066ECD2 /* slavefromsnapshot.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = slavefromsnapshot.js; sourceTree = "<group>"; };
+ 93B9F671112B3AD40066ECD2 /* copyauth.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; name = copyauth.js; path = auth/copyauth.js; sourceTree = "<group>"; };
+ 93B9F76A112B6C020066ECD2 /* snapshot1.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = snapshot1.js; sourceTree = "<group>"; };
+ 93B9F76B112B6C1D0066ECD2 /* snapshot2.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = snapshot2.js; sourceTree = "<group>"; };
+ 93B9F7E6112B98710066ECD2 /* snapshot3.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = snapshot3.js; sourceTree = "<group>"; };
+ 93B9F91A112C7F200066ECD2 /* set4.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = set4.js; sourceTree = "<group>"; };
+ 93B9F91B112C7F200066ECD2 /* set5.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = set5.js; sourceTree = "<group>"; };
+ 93B9F91C112C7F200066ECD2 /* set6.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = set6.js; sourceTree = "<group>"; };
+ 93B9FA36112CAC3C0066ECD2 /* shellkillop.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = shellkillop.js; sourceTree = "<group>"; };
93BC2AE10FB87662006BC285 /* cursortests.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = cursortests.cpp; sourceTree = "<group>"; };
93BC2AE20FB87662006BC285 /* jstests.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = jstests.cpp; sourceTree = "<group>"; };
93BCE15610F25DFE00FA139B /* arrayfind1.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = arrayfind1.js; sourceTree = "<group>"; };
@@ -481,9 +499,31 @@
93BCE4B510F3C8DB00FA139B /* allops.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = allops.js; sourceTree = "<group>"; };
93BCE5A510F3F8E900FA139B /* manyclients.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = manyclients.js; sourceTree = "<group>"; };
93BCE5A610F3FB5200FA139B /* basicPlus.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = basicPlus.js; sourceTree = "<group>"; };
+ 93BDCE401157E7280097FE87 /* repl10.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = repl10.js; sourceTree = "<group>"; };
+ 93BDCE411157E7280097FE87 /* repl11.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = repl11.js; sourceTree = "<group>"; };
+ 93BDCE92115817210097FE87 /* pair7.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = pair7.js; sourceTree = "<group>"; };
+ 93BDCEB9115830CB0097FE87 /* repl.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = repl.js; sourceTree = "<group>"; };
+ 93BFA0E311330A8C0045D084 /* not2.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = not2.js; sourceTree = "<group>"; };
93C38E940FA66622007D6E4A /* basictests.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = basictests.cpp; sourceTree = "<group>"; };
+ 93C8E6FE11457D9000F28017 /* master1.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = master1.js; sourceTree = "<group>"; };
+ 93C8E81C1145BCCA00F28017 /* regex7.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = regex7.js; sourceTree = "<group>"; };
+ 93C8E9DF1146D39700F28017 /* arrayfind2.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = arrayfind2.js; sourceTree = "<group>"; };
+ 93C8EB4D114721D000F28017 /* copydb2.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = copydb2.js; sourceTree = "<group>"; };
+ 93C8ECE61147820C00F28017 /* counters.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = counters.cpp; sourceTree = "<group>"; };
+ 93C8ECE71147820C00F28017 /* counters.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = counters.h; sourceTree = "<group>"; };
+ 93C8ECE91147820C00F28017 /* snapshots.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = snapshots.cpp; sourceTree = "<group>"; };
+ 93C8ECEA1147820C00F28017 /* snapshots.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = snapshots.h; sourceTree = "<group>"; };
+ 93C8ECEC1147820C00F28017 /* top.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = top.cpp; sourceTree = "<group>"; };
+ 93C8ECED1147820C00F28017 /* top.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = top.h; sourceTree = "<group>"; };
+ 93C8ED001147824B00F28017 /* thread_pool.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = thread_pool.cpp; sourceTree = "<group>"; };
+ 93C8ED041147828F00F28017 /* index.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = index.cpp; sourceTree = "<group>"; };
+ 93CC40C2113C407A00734218 /* insert1.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = insert1.js; sourceTree = "<group>"; };
+ 93CC441A113DE6BA00734218 /* indexg.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = indexg.js; sourceTree = "<group>"; };
+ 93CC4484113E602400734218 /* in3.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = in3.js; sourceTree = "<group>"; };
93D0C1520EF1D377005253B7 /* jsobjtests.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = jsobjtests.cpp; sourceTree = "<group>"; };
93D0C1FB0EF1E267005253B7 /* namespacetests.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = namespacetests.cpp; sourceTree = "<group>"; };
+ 93D5A8921117A1380052C931 /* regex6.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = regex6.js; sourceTree = "<group>"; };
+ 93D5AEC5111905B80010C810 /* import.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = import.cpp; sourceTree = "<group>"; };
93D6BBF70F265E1100FE5722 /* matchertests.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = matchertests.cpp; sourceTree = "<group>"; };
93D6BC9B0F266FC300FE5722 /* querytests.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = querytests.cpp; sourceTree = "<group>"; };
93DCDBD30F9515AF005349BC /* file_allocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = file_allocator.h; sourceTree = "<group>"; };
@@ -567,6 +607,16 @@
path = modules;
sourceTree = "<group>";
};
+ 9307500B114EA14700272A70 /* slow */ = {
+ isa = PBXGroup;
+ children = (
+ 9307500C114EA14700272A70 /* indexbg1.js */,
+ 9307500D114EA14700272A70 /* indexbg2.js */,
+ 9307500E114EA14700272A70 /* ns1.js */,
+ );
+ path = slow;
+ sourceTree = "<group>";
+ };
933A4D120F55A68600145C4B /* examples */ = {
isa = PBXGroup;
children = (
@@ -639,6 +689,9 @@
9342238F0EF16DB400608550 /* db */ = {
isa = PBXGroup;
children = (
+ 930750A7114EF4B100272A70 /* background.h */,
+ 93C8ED041147828F00F28017 /* index.cpp */,
+ 93C8ECE51147820C00F28017 /* stats */,
9303D1AB10E1415C00294FAC /* client.cpp */,
9303D1AC10E1415C00294FAC /* client.h */,
9303D1AD10E1415C00294FAC /* cmdline.h */,
@@ -690,8 +743,6 @@
934223A00EF16DB400608550 /* dbeval.cpp */,
934223A10EF16DB400608550 /* dbhelpers.cpp */,
934223A20EF16DB400608550 /* dbhelpers.h */,
- 934223A30EF16DB400608550 /* dbinfo.cpp */,
- 934223A40EF16DB400608550 /* dbinfo.h */,
934223A50EF16DB400608550 /* dbmessage.h */,
934223A60EF16DB400608550 /* dbwebserver.cpp */,
934223A70EF16DB400608550 /* instance.cpp */,
@@ -728,6 +779,27 @@
934BEB9A10DFFA9600178102 /* jstests */ = {
isa = PBXGroup;
children = (
+ 930750A8114EFB9900272A70 /* update_addToSet.js */,
+ 930750A9114EFB9900272A70 /* update_arraymatch1.js */,
+ 930750AA114EFB9900272A70 /* update_arraymatch2.js */,
+ 930750AB114EFB9900272A70 /* updateb.js */,
+ 930750AC114EFB9900272A70 /* updatec.js */,
+ 93075092114EE1BA00272A70 /* dbhash.js */,
+ 9307500B114EA14700272A70 /* slow */,
+ 93C8EB4D114721D000F28017 /* copydb2.js */,
+ 93C8E9DF1146D39700F28017 /* arrayfind2.js */,
+ 93C8E81C1145BCCA00F28017 /* regex7.js */,
+ 93CC4484113E602400734218 /* in3.js */,
+ 93CC441A113DE6BA00734218 /* indexg.js */,
+ 93CC40C2113C407A00734218 /* insert1.js */,
+ 93BFA0E311330A8C0045D084 /* not2.js */,
+ 93B9FA36112CAC3C0066ECD2 /* shellkillop.js */,
+ 93B9F91A112C7F200066ECD2 /* set4.js */,
+ 93B9F91B112C7F200066ECD2 /* set5.js */,
+ 93B9F91C112C7F200066ECD2 /* set6.js */,
+ 93B9F671112B3AD40066ECD2 /* copyauth.js */,
+ 93D5A8921117A1380052C931 /* regex6.js */,
+ 938E639A110FC66900A8760A /* auth */,
93BCE41810F3AF1B00FA139B /* capped2.js */,
93BCE1D310F26CDA00FA139B /* fsync2.js */,
93BCE15610F25DFE00FA139B /* arrayfind1.js */,
@@ -750,7 +822,6 @@
934BEBA410DFFA9600178102 /* array1.js */,
934BEBA510DFFA9600178102 /* array3.js */,
934BEBA610DFFA9600178102 /* auth1.js */,
- 934BEBA710DFFA9600178102 /* auth2.js */,
934BEBA810DFFA9600178102 /* autoid.js */,
934BEBA910DFFA9600178102 /* basic1.js */,
934BEBAA10DFFA9600178102 /* basic2.js */,
@@ -765,7 +836,6 @@
934BEBB310DFFA9600178102 /* basicb.js */,
934BEBB410DFFA9600178102 /* capped.js */,
934BEBB510DFFA9600178102 /* capped1.js */,
- 934BEBB610DFFA9600178102 /* btree.h */,
934BEBB710DFFA9600178102 /* capped3.js */,
934BEBB810DFFA9600178102 /* capped4.js */,
934BEBB910DFFA9600178102 /* clone */,
@@ -958,6 +1028,11 @@
934BEBCD10DFFA9600178102 /* disk */ = {
isa = PBXGroup;
children = (
+ 93B9F5A7112B12440066ECD2 /* slavefromsnapshot.js */,
+ 9391C9DD1120F9D300292B19 /* newcollection.js */,
+ 938E60AC110F734800A8760A /* directoryperdb.js */,
+ 938E60AB110F721900A8760A /* perdbpath.js */,
+ 938E5EB3110E1ED700A8760A /* repair.js */,
935C941B1106709800439EB1 /* preallocate.js */,
934BEBCE10DFFA9600178102 /* dbNoCreate.js */,
934BEBCF10DFFA9600178102 /* diskfull.js */,
@@ -987,6 +1062,13 @@
934BEC5010DFFA9600178102 /* repl */ = {
isa = PBXGroup;
children = (
+ 93BDCE92115817210097FE87 /* pair7.js */,
+ 93BDCE401157E7280097FE87 /* repl10.js */,
+ 93BDCE411157E7280097FE87 /* repl11.js */,
+ 93C8E6FE11457D9000F28017 /* master1.js */,
+ 93B9F7E6112B98710066ECD2 /* snapshot3.js */,
+ 93B9F76B112B6C1D0066ECD2 /* snapshot2.js */,
+ 93B9F76A112B6C020066ECD2 /* snapshot1.js */,
934BEC5110DFFA9600178102 /* basic1.js */,
934BEC5210DFFA9600178102 /* pair1.js */,
934BEC5310DFFA9600178102 /* pair2.js */,
@@ -1053,6 +1135,7 @@
934DD87B0EFAD23B00459CC1 /* util */ = {
isa = PBXGroup;
children = (
+ 93C8ED001147824B00F28017 /* thread_pool.cpp */,
934BEE8C10E050A500178102 /* allocator.h */,
934BEE8D10E050A500178102 /* assert_util.cpp */,
934BEE8E10E050A500178102 /* assert_util.h */,
@@ -1081,10 +1164,8 @@
936B895A0F4C899400934AF2 /* md5.c */,
936B895B0F4C899400934AF2 /* md5.h */,
936B895C0F4C899400934AF2 /* md5.hpp */,
- 936B895D0F4C899400934AF2 /* md5main.c */,
936B895E0F4C899400934AF2 /* message.cpp */,
936B895F0F4C899400934AF2 /* message.h */,
- 936B89600F4C899400934AF2 /* top.h */,
934DD87C0EFAD23B00459CC1 /* background.cpp */,
934DD87D0EFAD23B00459CC1 /* background.h */,
934DD87F0EFAD23B00459CC1 /* builder.h */,
@@ -1104,6 +1185,15 @@
path = util;
sourceTree = "<group>";
};
+ 938E639A110FC66900A8760A /* auth */ = {
+ isa = PBXGroup;
+ children = (
+ 938E63D0110FC96B00A8760A /* auth2.js */,
+ 938E639B110FC66900A8760A /* auth1.js */,
+ );
+ path = auth;
+ sourceTree = "<group>";
+ };
93A13A200F4620A500AF1B0D /* s */ = {
isa = PBXGroup;
children = (
@@ -1133,11 +1223,11 @@
93A13A320F4620E500AF1B0D /* tools */ = {
isa = PBXGroup;
children = (
+ 93D5AEC5111905B80010C810 /* import.cpp */,
931186FB0F8535FF00A6DC44 /* bridge.cpp */,
93A13A330F4620E500AF1B0D /* dump.cpp */,
93A13A350F4620E500AF1B0D /* export.cpp */,
93A13A370F4620E500AF1B0D /* files.cpp */,
- 93A13A390F4620E500AF1B0D /* importJSON.cpp */,
93A13A3B0F4620E500AF1B0D /* restore.cpp */,
93A13A3D0F4620E500AF1B0D /* sniffer.cpp */,
93A13A3F0F4620E500AF1B0D /* Tool.cpp */,
@@ -1168,9 +1258,23 @@
path = scripting;
sourceTree = "<group>";
};
+ 93C8ECE51147820C00F28017 /* stats */ = {
+ isa = PBXGroup;
+ children = (
+ 93C8ECE61147820C00F28017 /* counters.cpp */,
+ 93C8ECE71147820C00F28017 /* counters.h */,
+ 93C8ECE91147820C00F28017 /* snapshots.cpp */,
+ 93C8ECEA1147820C00F28017 /* snapshots.h */,
+ 93C8ECEC1147820C00F28017 /* top.cpp */,
+ 93C8ECED1147820C00F28017 /* top.h */,
+ );
+ path = stats;
+ sourceTree = "<group>";
+ };
93F0956F10E165E50053380C /* parallel */ = {
isa = PBXGroup;
children = (
+ 93BDCEB9115830CB0097FE87 /* repl.js */,
93BCE5A610F3FB5200FA139B /* basicPlus.js */,
93BCE5A510F3F8E900FA139B /* manyclients.js */,
93BCE4B510F3C8DB00FA139B /* allops.js */,
diff --git a/msvc/msvc_scripting.cpp b/msvc/msvc_scripting.cpp
index 0b4e21f..b79635d 100644
--- a/msvc/msvc_scripting.cpp
+++ b/msvc/msvc_scripting.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
#include "stdafx.h"
diff --git a/rpm/init.d-mongod b/rpm/init.d-mongod
index c099ef9..12068c8 100644
--- a/rpm/init.d-mongod
+++ b/rpm/init.d-mongod
@@ -12,30 +12,37 @@
# things from mongod.conf get there by mongod reading it
+
+
OPTIONS=" -f /etc/mongod.conf"
+SYSCONFIG="/etc/sysconfig/mongod"
mongod=${MONGOD-/usr/bin/mongod}
-pidfile=${PIDFILE-/var/run/mongod.pid}
-lockfile=${LOCKFILE-/var/lock/subsys/mongod}
+
+MONGO_USER=mongod
+MONGO_GROUP=mongod
+
+. "$SYSCONFIG" || true
start()
{
echo -n $"Starting mongod: "
- #daemon --pidfile=${pidfile} $mongod $OPTIONS > /var/log/mongod
- $mongod $OPTIONS > /var/log/mongod 2>&1 &
+ daemon --user "$MONGO_USER" $mongod $OPTIONS
RETVAL=$?
- [ $RETVAL = 0 ] && touch ${lockfile}
- echo OK
+ [ $RETVAL -eq 0 ] && touch /var/lock/subsys/mongod && success
}
stop()
{
echo -n $"Stopping mongod: "
- #killproc -p ${pidfile} -d 10 $mongod
- #RETVAL=$?
- killall mongod > /dev/null 2>&1
- #[ $RETVAL = 0 ] && rm -f ${lockfile} ${pidfile}
- echo OK
+ killproc -p /var/lib/mongo/mongod.lock -t30 -TERM /usr/bin/mongod
+ RETVAL=$?
+ [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/mongod && success
+}
+
+restart () {
+ stop
+ start
}
ulimit -n 12000
@@ -48,15 +55,17 @@ case "$1" in
stop)
stop
;;
- restart)
- stop
- start
+ restart|reload|force-reload)
+ restart
+ ;;
+ condrestart)
+ [ -f /var/lock/subsys/mongodb ] && restart || :
+ ;;
+ status)
+ status $mongod
;;
-# status)
-# status -p ${pidfile} $mongod
-# ;;
*)
- echo $"Usage: $0 {start|stop|restart}"
+ echo "Usage: $0 {start|stop|status|restart|reload|force-reload|condrestart}"
RETVAL=1
esac
diff --git a/rpm/mongo.mdv.spec b/rpm/mongo.mdv.spec
new file mode 100644
index 0000000..cab3fed
--- /dev/null
+++ b/rpm/mongo.mdv.spec
@@ -0,0 +1,143 @@
+%define name mongodb
+%define version 1.3.4
+%define release %mkrel 1
+
+Name: %{name}
+Version: %{version}
+Release: %{release}
+Summary: MongoDB client shell and tools
+License: AGPL 3.0
+URL: http://www.mongodb.org
+Group: Databases
+
+Source0: http://downloads.mongodb.org/src/%{name}-src-r%{version}.tar.gz
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
+BuildRequires: js-devel, readline-devel, boost-devel, pcre-devel
+BuildRequires: gcc-c++, scons
+
+%description
+Mongo (from "huMONGOus") is a schema-free document-oriented database.
+It features dynamic profileable queries, full indexing, replication
+and fail-over support, efficient storage of large binary data objects,
+and auto-sharding.
+
+This package provides the mongo shell, import/export tools, and other
+client utilities.
+
+%package server
+Summary: MongoDB server, sharding server, and support scripts
+Group: Databases
+Requires: mongodb
+
+%description server
+Mongo (from "huMONGOus") is a schema-free document-oriented database.
+
+This package provides the mongo server software, mongo sharding server
+softwware, default configuration files, and init.d scripts.
+
+%package devel
+Summary: Headers and libraries for mongo development.
+Group: Databases
+
+%description devel
+Mongo (from "huMONGOus") is a schema-free document-oriented database.
+
+This package provides the mongo static library and header files needed
+to develop mongo client software.
+
+%prep
+%setup -n %{name}-src-r%{version}
+
+%build
+scons --prefix=$RPM_BUILD_ROOT/usr all
+# XXX really should have shared library here
+
+%install
+scons --prefix=$RPM_BUILD_ROOT%{_usr} install
+mkdir -p $RPM_BUILD_ROOT%{_mandir}/man1
+cp debian/*.1 $RPM_BUILD_ROOT%{_mandir}/man1/
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/rc.d/init.d
+cp rpm/init.d-mongod $RPM_BUILD_ROOT%{_sysconfdir}/rc.d/init.d/mongod
+chmod a+x $RPM_BUILD_ROOT%{_sysconfdir}/rc.d/init.d/mongod
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}
+cp rpm/mongod.conf $RPM_BUILD_ROOT%{_sysconfdir}/mongod.conf
+mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/sysconfig
+cp rpm/mongod.sysconfig $RPM_BUILD_ROOT%{_sysconfdir}/sysconfig/mongod
+mkdir -p $RPM_BUILD_ROOT%{_var}/lib/mongo
+mkdir -p $RPM_BUILD_ROOT%{_var}/log/mongo
+touch $RPM_BUILD_ROOT%{_var}/log/mongo/mongod.log
+
+%clean
+scons -c
+rm -rf $RPM_BUILD_ROOT
+
+%pre server
+%{_sbindir}/useradd -M -r -U -d %{_var}/lib/mongo -s /bin/false \
+ -c mongod mongod > /dev/null 2>&1
+
+%post server
+if test $1 = 1
+then
+ /sbin/chkconfig --add mongod
+fi
+
+%preun server
+if test $1 = 0
+then
+ /sbin/chkconfig --del mongod
+fi
+
+%postun server
+if test $1 -ge 1
+then
+ /sbin/service mongod stop >/dev/null 2>&1 || :
+fi
+
+%files
+%defattr(-,root,root,-)
+%doc README GNU-AGPL-3.0.txt
+
+%{_bindir}/mongo
+%{_bindir}/mongodump
+%{_bindir}/mongoexport
+%{_bindir}/mongofiles
+%{_bindir}/mongoimport
+%{_bindir}/mongorestore
+%{_bindir}/mongostat
+
+%{_mandir}/man1/mongo.1*
+%{_mandir}/man1/mongod.1*
+%{_mandir}/man1/mongodump.1*
+%{_mandir}/man1/mongoexport.1*
+%{_mandir}/man1/mongofiles.1*
+%{_mandir}/man1/mongoimport.1*
+%{_mandir}/man1/mongosniff.1*
+%{_mandir}/man1/mongostat.1*
+%{_mandir}/man1/mongorestore.1*
+
+%files server
+%defattr(-,root,root,-)
+%config(noreplace) %{_sysconfdir}/mongod.conf
+%{_bindir}/mongod
+%{_bindir}/mongos
+%{_mandir}/man1/mongos.1*
+%{_sysconfdir}/rc.d/init.d/mongod
+%{_sysconfdir}/sysconfig/mongod
+%attr(0755,mongod,mongod) %dir %{_var}/lib/mongo
+%attr(0755,mongod,mongod) %dir %{_var}/log/mongo
+%attr(0640,mongod,mongod) %config(noreplace) %verify(not md5 size mtime) %{_var}/log/mongo/mongod.log
+
+%files devel
+%{_includedir}/mongo
+%{_libdir}/libmongoclient.a
+#%{_libdir}/libmongotestfiles.a
+
+%changelog
+* Sun Mar 21 2010 Ludovic Bellière <xrogaan@gmail.com>
+- Update mongo.spec for mandriva packaging
+
+* Thu Jan 28 2010 Richard M Kreuter <richard@10gen.com>
+- Minor fixes.
+
+* Sat Oct 24 2009 Joe Miklojcik <jmiklojcik@shopwiki.com> -
+- Wrote mongo.spec. \ No newline at end of file
diff --git a/rpm/mongo.spec b/rpm/mongo.spec
index f58b61c..eac1cff 100644
--- a/rpm/mongo.spec
+++ b/rpm/mongo.spec
@@ -1,5 +1,5 @@
Name: mongo
-Version: 1.3.1
+Version: 1.4.0
Release: mongodb_1%{?dist}
Summary: mongo client shell and tools
License: AGPL 3.0
@@ -23,6 +23,7 @@ client utilities.
%package server
Summary: mongo server, sharding server, and support scripts
Group: Applications/Databases
+Requires: mongo
%description server
Mongo (from "huMONGOus") is a schema-free document-oriented database.
@@ -56,17 +57,19 @@ cp rpm/init.d-mongod $RPM_BUILD_ROOT/etc/rc.d/init.d/mongod
chmod a+x $RPM_BUILD_ROOT/etc/rc.d/init.d/mongod
mkdir -p $RPM_BUILD_ROOT/etc
cp rpm/mongod.conf $RPM_BUILD_ROOT/etc/mongod.conf
+mkdir -p $RPM_BUILD_ROOT/etc/sysconfig
+cp rpm/mongod.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/mongod
mkdir -p $RPM_BUILD_ROOT/var/lib/mongo
-mkdir -p $RPM_BUILD_ROOT/var/log
-touch $RPM_BUILD_ROOT/var/log/mongo
+mkdir -p $RPM_BUILD_ROOT/var/log/mongo
+touch $RPM_BUILD_ROOT/var/log/mongo/mongod.log
%clean
scons -c
rm -rf $RPM_BUILD_ROOT
%pre server
-#/usr/sbin/useradd -M -o -r -d /var/mongo -s /bin/bash \
-# -c "mongod" mongod > /dev/null 2>&1 || :
+/usr/sbin/useradd -M -r -U -d /var/lib/mongo -s /bin/false \
+ -c mongod mongod > /dev/null 2>&1
%post server
if test $1 = 1
@@ -96,12 +99,16 @@ fi
%{_bindir}/mongofiles
%{_bindir}/mongoimport
%{_bindir}/mongorestore
+%{_bindir}/mongostat
%{_mandir}/man1/mongo.1*
+%{_mandir}/man1/mongod.1*
%{_mandir}/man1/mongodump.1*
%{_mandir}/man1/mongoexport.1*
%{_mandir}/man1/mongofiles.1*
%{_mandir}/man1/mongoimport.1*
+%{_mandir}/man1/mongosniff.1*
+%{_mandir}/man1/mongostat.1*
%{_mandir}/man1/mongorestore.1*
%files server
@@ -114,8 +121,9 @@ fi
/etc/rc.d/init.d/mongod
/etc/sysconfig/mongod
#/etc/rc.d/init.d/mongos
-%attr(0755,root,root) %dir /var/mongo
-%attr(0640,root,root) %config(noreplace) %verify(not md5 size mtime) /var/log/mongo
+%attr(0755,mongod,mongod) %dir /var/lib/mongo
+%attr(0755,mongod,mongod) %dir /var/log/mongo
+%attr(0640,mongod,mongod) %config(noreplace) %verify(not md5 size mtime) /var/log/mongo/mongod.log
%files devel
/usr/include/mongo
@@ -123,6 +131,8 @@ fi
#%{_libdir}/libmongotestfiles.a
%changelog
+* Thu Jan 28 2010 Richard M Kreuter <richard@10gen.com>
+- Minor fixes.
+
* Sat Oct 24 2009 Joe Miklojcik <jmiklojcik@shopwiki.com> -
- Wrote mongo.spec.
-
diff --git a/rpm/mongod.conf b/rpm/mongod.conf
index 0c87186..b8897b2 100644
--- a/rpm/mongod.conf
+++ b/rpm/mongod.conf
@@ -1,7 +1,10 @@
# mongo.conf
#where to log
-logpath=/var/log/mongod
+logpath=/var/log/mongo/mongod.log
+
+# fork and run in background
+fork = true
#port = 27017
diff --git a/rpm/mongod.sysconfig b/rpm/mongod.sysconfig
new file mode 100644
index 0000000..5dbfce1
--- /dev/null
+++ b/rpm/mongod.sysconfig
@@ -0,0 +1 @@
+# TODO: add relevant configuration stuff here.
diff --git a/s/chunk.cpp b/s/chunk.cpp
index 47c13e8..73d17d9 100644
--- a/s/chunk.cpp
+++ b/s/chunk.cpp
@@ -28,7 +28,7 @@ namespace mongo {
// ------- Shard --------
- long Chunk::MaxChunkSize = 1024 * 1204 * 50;
+ int Chunk::MaxChunkSize = 1024 * 1204 * 200;
Chunk::Chunk( ChunkManager * manager ) : _manager( manager ){
_modified = false;
@@ -41,13 +41,13 @@ namespace mongo {
_markModified();
}
- bool Chunk::contains( const BSONObj& obj ){
+ bool Chunk::contains( const BSONObj& obj ) const{
return
_manager->getShardKey().compare( getMin() , obj ) <= 0 &&
_manager->getShardKey().compare( obj , getMax() ) < 0;
}
- BSONObj Chunk::pickSplitPoint(){
+ BSONObj Chunk::pickSplitPoint() const{
int sort = 0;
if ( _manager->getShardKey().globalMin().woCompare( getMin() ) == 0 ){
@@ -77,7 +77,7 @@ namespace mongo {
}
BSONObj end = conn->findOne( _ns , q );
conn.done();
-
+
if ( ! end.isEmpty() )
return _manager->getShardKey().extractKey( end );
}
@@ -93,9 +93,25 @@ namespace mongo {
ss << "medianKey command failed: " << result;
uassert( 10164 , ss.str() , 0 );
}
+
+ BSONObj median = result.getObjectField( "median" );
+ if (median == getMin()){
+ //TODO compound support
+ BSONElement key = getMin().firstElement();
+ BSONObjBuilder b;
+ b.appendAs("$gt", key);
+
+ Query q = QUERY(key.fieldName() << b.obj());
+ q.sort(_manager->getShardKey().key());
+
+ median = conn->findOne(_ns, q);
+ median = _manager->getShardKey().extractKey( median );
+ PRINT(median);
+ }
+
conn.done();
- return result.getObjectField( "median" ).getOwned();
+ return median.getOwned();
}
Chunk * Chunk::split(){
@@ -109,6 +125,8 @@ namespace mongo {
<< "\t self : " << toString() << endl;
uassert( 10166 , "locking namespace on server failed" , lockNamespaceOnServer( getShard() , _ns ) );
+ uassert( 13003 , "can't split chunk. does it have only one distinct value?" ,
+ !m.isEmpty() && _min.woCompare(m) && _max.woCompare(m));
Chunk * s = new Chunk( _manager );
s->_ns = _ns;
@@ -216,10 +234,13 @@ namespace mongo {
if ( _dataWritten < MaxChunkSize / 5 )
return false;
+
+ log(1) << "\t want to split chunk : " << this << endl;
_dataWritten = 0;
- if ( _min.woCompare( _max ) == 0 ){
+ BSONObj split_point = pickSplitPoint();
+ if ( split_point.isEmpty() || _min == split_point || _max == split_point) {
log() << "SHARD PROBLEM** shard is too big, but can't split: " << toString() << endl;
return false;
}
@@ -229,7 +250,7 @@ namespace mongo {
return false;
log() << "autosplitting " << _ns << " size: " << size << " shard: " << toString() << endl;
- Chunk * newShard = split();
+ Chunk * newShard = split(split_point);
moveIfShould( newShard );
@@ -268,7 +289,7 @@ namespace mongo {
return true;
}
- long Chunk::getPhysicalSize(){
+ long Chunk::getPhysicalSize() const{
ScopedDbConnection conn( getShard() );
BSONObj result;
@@ -283,7 +304,7 @@ namespace mongo {
}
- long Chunk::countObjects( const BSONObj& filter ){
+ long Chunk::countObjects( const BSONObj& filter ) const{
ScopedDbConnection conn( getShard() );
BSONObj f = getFilter();
@@ -297,14 +318,14 @@ namespace mongo {
return (long)n;
}
- bool Chunk::operator==( const Chunk& s ){
+ bool Chunk::operator==( const Chunk& s ) const{
return
_manager->getShardKey().compare( _min , s._min ) == 0 &&
_manager->getShardKey().compare( _max , s._max ) == 0
;
}
- void Chunk::getFilter( BSONObjBuilder& b ){
+ void Chunk::getFilter( BSONObjBuilder& b ) const{
_manager->_key.getFilter( b , _min , _max );
}
@@ -383,7 +404,7 @@ namespace mongo {
}
- ShardKeyPattern Chunk::skey(){
+ ShardKeyPattern Chunk::skey() const{
return _manager->getShardKey();
}
diff --git a/s/chunk.h b/s/chunk.h
index 7395133..25502e4 100644
--- a/s/chunk.h
+++ b/s/chunk.h
@@ -61,27 +61,27 @@ namespace mongo {
_max = o;
}
- string getShard(){
+ string getShard() const{
return _shard;
}
void setShard( string shard );
- bool contains( const BSONObj& obj );
+ bool contains( const BSONObj& obj ) const;
string toString() const;
operator string() const { return toString(); }
- bool operator==(const Chunk& s);
+ bool operator==(const Chunk& s) const;
- bool operator!=(const Chunk& s){
+ bool operator!=(const Chunk& s) const{
return ! ( *this == s );
}
- void getFilter( BSONObjBuilder& b );
- BSONObj getFilter(){ BSONObjBuilder b; getFilter( b ); return b.obj(); }
+ void getFilter( BSONObjBuilder& b ) const;
+ BSONObj getFilter() const{ BSONObjBuilder b; getFilter( b ); return b.obj(); }
- BSONObj pickSplitPoint();
+ BSONObj pickSplitPoint() const;
Chunk * split();
Chunk * split( const BSONObj& middle );
@@ -89,9 +89,9 @@ namespace mongo {
* @return size of shard in bytes
* talks to mongod to do this
*/
- long getPhysicalSize();
+ long getPhysicalSize() const;
- long countObjects( const BSONObj& filter = BSONObj() );
+ long countObjects( const BSONObj& filter = BSONObj() ) const;
/**
* if the amount of data written nears the max size of a shard
@@ -119,14 +119,14 @@ namespace mongo {
void _markModified();
- static long MaxChunkSize;
+ static int MaxChunkSize;
private:
// main shard info
ChunkManager * _manager;
- ShardKeyPattern skey();
+ ShardKeyPattern skey() const;
string _ns;
BSONObj _min;
@@ -218,4 +218,22 @@ namespace mongo {
static unsigned long long NextSequenceNumber;
};
+ // like BSONObjCmp. for use as an STL comparison functor
+ // key-order in "order" argument must match key-order in shardkey
+ class ChunkCmp {
+ public:
+ ChunkCmp( const BSONObj &order = BSONObj() ) : _cmp( order ) {}
+ bool operator()( const Chunk &l, const Chunk &r ) const {
+ return _cmp(l.getMin(), r.getMin());
+ }
+
+ bool operator()( const Chunk *l, const Chunk *r ) const {
+ return operator()(*l, *r);
+ }
+ private:
+ BSONObjCmp _cmp;
+ };
+
+
+
} // namespace mongo
diff --git a/s/commands_admin.cpp b/s/commands_admin.cpp
index e79b529..0b2baa0 100644
--- a/s/commands_admin.cpp
+++ b/s/commands_admin.cpp
@@ -54,6 +54,9 @@ namespace mongo {
virtual bool adminOnly() {
return true;
}
+
+ // all grid commands are designed not to lock
+ virtual LockType locktype(){ return NONE; }
};
// --------------- misc commands ----------------------
@@ -506,8 +509,7 @@ namespace mongo {
if ( host == "localhost" || host.find( "localhost:" ) == 0 ||
host == "127.0.0.1" || host.find( "127.0.0.1:" ) == 0 ){
- if ( cmdObj["allowLocal"].type() != Bool ||
- ! cmdObj["allowLocal"].boolean() ){
+ if ( ! cmdObj["allowLocal"].trueValue() ){
errmsg =
"can't use localhost as a shard since all shards need to communicate. "
"allowLocal to override for testing";
@@ -586,6 +588,7 @@ namespace mongo {
class IsDbGridCmd : public Command {
public:
+ virtual LockType locktype(){ return NONE; }
virtual bool slaveOk() {
return true;
}
@@ -599,6 +602,7 @@ namespace mongo {
class CmdIsMaster : public Command {
public:
+ virtual LockType locktype(){ return NONE; }
virtual bool requiresAuth() { return false; }
virtual bool slaveOk() {
return true;
@@ -616,6 +620,7 @@ namespace mongo {
class CmdShardingGetPrevError : public Command {
public:
+ virtual LockType locktype(){ return NONE; }
virtual bool requiresAuth() { return false; }
virtual bool slaveOk() {
return true;
@@ -632,6 +637,7 @@ namespace mongo {
class CmdShardingGetLastError : public Command {
public:
+ virtual LockType locktype(){ return NONE; }
virtual bool requiresAuth() { return false; }
virtual bool slaveOk() {
return true;
diff --git a/s/commands_public.cpp b/s/commands_public.cpp
index 2d3de7a..649d7d1 100644
--- a/s/commands_public.cpp
+++ b/s/commands_public.cpp
@@ -42,6 +42,10 @@ namespace mongo {
virtual bool adminOnly() {
return false;
}
+
+ // all grid commands are designed not to lock
+ virtual LockType locktype(){ return NONE; }
+
protected:
string getDBName( string ns ){
return ns.substr( 0 , ns.size() - 5 );
@@ -173,6 +177,160 @@ namespace mongo {
}
} countCmd;
+ class CollectionStats : public PublicGridCommand {
+ public:
+ CollectionStats() : PublicGridCommand("collstats") { }
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ string dbName = getDBName( ns );
+ string collection = cmdObj.firstElement().valuestrsafe();
+ string fullns = dbName + "." + collection;
+
+ DBConfig * conf = grid.getDBConfig( dbName , false );
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ){
+ result.appendBool("sharded", false);
+ return passthrough( conf , cmdObj , result);
+ }
+ result.appendBool("sharded", true);
+
+ ChunkManager * cm = conf->getChunkManager( fullns );
+ massert( 12594 , "how could chunk manager be null!" , cm );
+
+ set<string> servers;
+ cm->getAllServers(servers);
+
+ BSONObjBuilder shardStats;
+ long long count=0;
+ long long size=0;
+ long long storageSize=0;
+ int nindexes=0;
+ for ( set<string>::iterator i=servers.begin(); i!=servers.end(); i++ ){
+ ScopedDbConnection conn( *i );
+ BSONObj res;
+ if ( ! conn->runCommand( dbName , cmdObj , res ) ){
+ errmsg = "failed on shard: " + res.toString();
+ return false;
+ }
+ conn.done();
+
+ count += res["count"].numberLong();
+ size += res["size"].numberLong();
+ storageSize += res["storageSize"].numberLong();
+
+ if (nindexes)
+ massert(12595, "nindexes should be the same on all shards!", nindexes == res["nindexes"].numberInt());
+ else
+ nindexes = res["nindexes"].numberInt();
+
+ shardStats.append(*i, res);
+ }
+
+ result.append("ns", fullns);
+ result.appendNumber("count", count);
+ result.appendNumber("size", size);
+ result.appendNumber("storageSize", storageSize);
+ result.append("nindexes", nindexes);
+
+ result.append("nchunks", cm->numChunks());
+ result.append("shards", shardStats.obj());
+
+ return true;
+ }
+ } collectionStatsCmd;
+
+ class FindAndModifyCmd : public PublicGridCommand {
+ public:
+ FindAndModifyCmd() : PublicGridCommand("findandmodify") { }
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ string dbName = getDBName( ns );
+ string collection = cmdObj.firstElement().valuestrsafe();
+ string fullns = dbName + "." + collection;
+
+ BSONObj filter = cmdObj.getObjectField("query");
+
+ DBConfig * conf = grid.getDBConfig( dbName , false );
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ){
+ return passthrough( conf , cmdObj , result);
+ }
+
+ ChunkManager * cm = conf->getChunkManager( fullns );
+ massert( 13002 , "how could chunk manager be null!" , cm );
+
+ vector<Chunk*> chunks;
+ cm->getChunksForQuery( chunks , filter );
+
+ BSONObj sort = cmdObj.getObjectField("sort");
+ if (!sort.isEmpty()){
+ ShardKeyPattern& sk = cm->getShardKey();
+ {
+ BSONObjIterator k (sk.key());
+ BSONObjIterator s (sort);
+ bool good = true;
+ while (k.more()){
+ if (!s.more()){
+ good = false;
+ break;
+ }
+
+ BSONElement ke = k.next();
+ BSONElement se = s.next();
+
+ // TODO consider values when we support compound keys
+ if (strcmp(ke.fieldName(), se.fieldName()) != 0){
+ good = false;
+ break;
+ }
+ }
+
+ uassert(13001, "Sort must match shard key for sharded findandmodify", good);
+ }
+
+ std::sort(chunks.begin(), chunks.end(), ChunkCmp(sort));
+ }
+
+ for ( vector<Chunk*>::iterator i = chunks.begin() ; i != chunks.end() ; i++ ){
+ Chunk * c = *i;
+
+ ScopedDbConnection conn( c->getShard() );
+ BSONObj res;
+ bool ok = conn->runCommand( conf->getName() , fixCmdObj(cmdObj, c) , res );
+ conn.done();
+
+ if (ok || (strcmp(res["errmsg"].valuestrsafe(), "No matching object found") != 0)){
+ result.appendElements(res);
+ return ok;
+ }
+ }
+
+ return true;
+ }
+
+ private:
+ BSONObj fixCmdObj(const BSONObj& cmdObj, const Chunk* chunk){
+ assert(chunk);
+
+ BSONObjBuilder b;
+ BSONObjIterator i(cmdObj);
+ bool foundQuery = false;
+ while (i.more()){
+ BSONElement e = i.next();
+ if (strcmp(e.fieldName(), "query") != 0){
+ b.append(e);
+ }else{
+ foundQuery = true;
+ b.append("query", ClusteredCursor::concatQuery(e.embeddedObjectUserCheck(), chunk->getFilter()));
+ }
+ }
+
+ if (!foundQuery)
+ b.append("query", chunk->getFilter());
+
+ return b.obj();
+ }
+
+ } findAndModifyCmd;
+
class ConvertToCappedCmd : public NotAllowedOnShardedCollectionCmd {
public:
ConvertToCappedCmd() : NotAllowedOnShardedCollectionCmd("convertToCapped"){}
diff --git a/s/config.cpp b/s/config.cpp
index 0bfb5a3..c3c3668 100644
--- a/s/config.cpp
+++ b/s/config.cpp
@@ -129,6 +129,8 @@ namespace mongo {
void DBConfig::unserialize(const BSONObj& from){
_name = from.getStringField("name");
+ log(1) << "DBConfig unserialize: " << _name << " " << from << endl;
+
_shardingEnabled = from.getBoolField("partitioned");
_primary = from.getStringField("primary");
@@ -297,7 +299,7 @@ namespace mongo {
if ( database == "config" )
return &configServer;
- boostlock l( _lock );
+ scoped_lock l( _lock );
DBConfig*& cc = _databases[database];
if ( cc == 0 ){
@@ -333,7 +335,7 @@ namespace mongo {
void Grid::removeDB( string database ){
uassert( 10186 , "removeDB expects db name" , database.find( '.' ) == string::npos );
- boostlock l( _lock );
+ scoped_lock l( _lock );
_databases.erase( database );
}
@@ -369,30 +371,35 @@ namespace mongo {
}
ourHostname = hn;
+ stringstream fullString;
+
set<string> hosts;
for ( size_t i=0; i<configHosts.size(); i++ ){
string host = configHosts[i];
hosts.insert( getHost( host , false ) );
configHosts[i] = getHost( host , true );
+ if ( i > 0 )
+ fullString << ",";
+ fullString << configHosts[i];
}
-
+
for ( set<string>::iterator i=hosts.begin(); i!=hosts.end(); i++ ){
string host = *i;
bool ok = false;
- for ( int x=0; x<10; x++ ){
+ for ( int x=10; x>0; x-- ){
if ( ! hostbyname( host.c_str() ).empty() ){
ok = true;
break;
}
- log() << "can't resolve DNS for [" << host << "] sleeping and trying " << (10-x) << " more times" << endl;
+ log() << "can't resolve DNS for [" << host << "] sleeping and trying " << x << " more times" << endl;
sleepsecs( 10 );
}
if ( ! ok )
return false;
}
- uassert( 10188 , "can only hand 1 config db right now" , configHosts.size() == 1 );
- _primary = configHosts[0];
+ _primary = fullString.str();
+ log(1) << " config string : " << fullString.str() << endl;
return true;
}
@@ -448,7 +455,7 @@ namespace mongo {
if ( cur == 0 ){
ScopedDbConnection conn( _primary );
- conn->insert( "config.version" , BSON( "version" << VERSION ) );
+ conn->insert( "config.version" , BSON( "_id" << 1 << "version" << VERSION ) );
pool.flush();
assert( VERSION == dbConfigVersion( conn.conn() ) );
conn.done();
@@ -459,6 +466,32 @@ namespace mongo {
return -8;
}
+ void ConfigServer::reloadSettings(){
+ set<string> got;
+
+ ScopedDbConnection conn( _primary );
+ auto_ptr<DBClientCursor> c = conn->query( "config.settings" , BSONObj() );
+ while ( c->more() ){
+ BSONObj o = c->next();
+ string name = o["_id"].valuestrsafe();
+ got.insert( name );
+ if ( name == "chunksize" ){
+ log(1) << "MaxChunkSize: " << o["value"] << endl;
+ Chunk::MaxChunkSize = o["value"].numberInt() * 1024 * 1024;
+ }
+ else {
+ log() << "warning: unknown setting [" << name << "]" << endl;
+ }
+ }
+
+ if ( ! got.count( "chunksize" ) ){
+ conn->insert( "config.settings" , BSON( "_id" << "chunksize" <<
+ "value" << (Chunk::MaxChunkSize / ( 1024 * 1024 ) ) ) );
+ }
+
+ conn.done();
+ }
+
string ConfigServer::getHost( string name , bool withPort ){
if ( name.find( ":" ) ){
if ( withPort )
diff --git a/s/config.h b/s/config.h
index 16aa67a..3b0dc4c 100644
--- a/s/config.h
+++ b/s/config.h
@@ -151,7 +151,7 @@ namespace mongo {
unsigned long long getNextOpTime() const;
private:
map<string,DBConfig*> _databases;
- boost::mutex _lock; // TODO: change to r/w lock
+ mongo::mutex _lock; // TODO: change to r/w lock
};
class ConfigServer : public DBConfig {
@@ -180,6 +180,8 @@ namespace mongo {
int dbConfigVersion();
int dbConfigVersion( DBClientBase& conn );
+
+ void reloadSettings();
/**
* @return 0 = ok, otherwise error #
diff --git a/s/cursors.cpp b/s/cursors.cpp
index 23b8eaf..a1c9dfa 100644
--- a/s/cursors.cpp
+++ b/s/cursors.cpp
@@ -1,4 +1,20 @@
// cursors.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
#include "stdafx.h"
#include "cursors.h"
diff --git a/s/cursors.h b/s/cursors.h
index b1ed4b0..a61bed3 100644
--- a/s/cursors.h
+++ b/s/cursors.h
@@ -1,4 +1,20 @@
// cursors.h
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
#pragma once
diff --git a/s/d_logic.cpp b/s/d_logic.cpp
index cc627eb..2a9cde3 100644
--- a/s/d_logic.cpp
+++ b/s/d_logic.cpp
@@ -79,6 +79,7 @@ namespace mongo {
class WriteBackCommand : public MongodShardCommand {
public:
+ virtual LockType locktype(){ return NONE; }
WriteBackCommand() : MongodShardCommand( "writebacklisten" ){}
bool run(const char *cmdns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
@@ -90,8 +91,6 @@ namespace mongo {
const OID id = e.__oid();
- dbtemprelease unlock;
-
if ( ! clientQueues[id.str()] )
clientQueues[id.str()] = new BlockingQueue<BSONObj>();
@@ -114,6 +113,8 @@ namespace mongo {
help << " example: { setShardVersion : 'alleyinsider.foo' , version : 1 , configdb : '' } ";
}
+ virtual LockType locktype(){ return WRITE; } // TODO: figure out how to make this not need to lock
+
bool run(const char *cmdns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
bool authoritative = cmdObj.getBoolField( "authoritative" );
@@ -247,6 +248,8 @@ namespace mongo {
help << " example: { getShardVersion : 'alleyinsider.foo' } ";
}
+ virtual LockType locktype(){ return WRITE; } // TODO: figure out how to make this not need to lock
+
bool run(const char *cmdns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
string ns = cmdObj["getShardVersion"].valuestrsafe();
if ( ns.size() == 0 ){
@@ -273,6 +276,8 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << "should not be calling this directly" << endl;
}
+
+ virtual LockType locktype(){ return WRITE; }
bool run(const char *cmdns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
// so i have to start clone, tell caller its ok to make change
@@ -342,6 +347,8 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << "should not be calling this directly" << endl;
}
+
+ virtual LockType locktype(){ return WRITE; }
bool run(const char *cmdns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
// see MoveShardStartCommand::run
diff --git a/s/d_logic.h b/s/d_logic.h
index 3e483c4..e426cb2 100644
--- a/s/d_logic.h
+++ b/s/d_logic.h
@@ -1,4 +1,20 @@
// d_logic.h
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
#pragma once
diff --git a/s/dbgrid.vcproj b/s/dbgrid.vcproj
index 2c8ef85..06e6c32 100644
--- a/s/dbgrid.vcproj
+++ b/s/dbgrid.vcproj
@@ -42,7 +42,7 @@
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="&quot;..\pcre-7.4&quot;;&quot;C:\Program Files\boost\boost_1_35_0&quot;"
- PreprocessorDefinitions="USE_ASIO;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC"
+ PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
@@ -433,6 +433,10 @@
>
</File>
<File
+ RelativePath="..\db\cmdline.cpp"
+ >
+ </File>
+ <File
RelativePath="..\db\commands.cpp"
>
</File>
@@ -497,40 +501,8 @@
>
</File>
<File
- RelativePath="..\util\message_server_asio.cpp"
+ RelativePath="..\util\message_server_port.cpp"
>
- <FileConfiguration
- Name="Debug|Win32"
- >
- <Tool
- Name="VCCLCompilerTool"
- UsePrecompiledHeader="0"
- />
- </FileConfiguration>
- <FileConfiguration
- Name="Release|Win32"
- >
- <Tool
- Name="VCCLCompilerTool"
- UsePrecompiledHeader="0"
- />
- </FileConfiguration>
- <FileConfiguration
- Name="release_nojni|Win32"
- >
- <Tool
- Name="VCCLCompilerTool"
- UsePrecompiledHeader="0"
- />
- </FileConfiguration>
- <FileConfiguration
- Name="Debug Recstore|Win32"
- >
- <Tool
- Name="VCCLCompilerTool"
- UsePrecompiledHeader="0"
- />
- </FileConfiguration>
</File>
<File
RelativePath="..\db\nonce.cpp"
@@ -545,6 +517,10 @@
>
</File>
<File
+ RelativePath="..\client\syncclusterconnection.cpp"
+ >
+ </File>
+ <File
RelativePath="..\util\thread_pool.cpp"
>
</File>
diff --git a/s/request.cpp b/s/request.cpp
index 8bebd64..02ada3c 100644
--- a/s/request.cpp
+++ b/s/request.cpp
@@ -74,7 +74,7 @@ namespace mongo {
void Request::process( int attempt ){
- log(2) << "Request::process ns: " << getns() << " msg id:" << (int)(_m.data->id) << " attempt: " << attempt << endl;
+ log(3) << "Request::process ns: " << getns() << " msg id:" << (int)(_m.data->id) << " attempt: " << attempt << endl;
int op = _m.data->operation();
assert( op > dbMsg );
@@ -118,7 +118,7 @@ namespace mongo {
}
ClientInfo::~ClientInfo(){
- boostlock lk( _clientsLock );
+ scoped_lock lk( _clientsLock );
ClientCache::iterator i = _clients.find( _id );
if ( i != _clients.end() ){
_clients.erase( i );
@@ -157,7 +157,7 @@ namespace mongo {
return info;
}
- boostlock lk( _clientsLock );
+ scoped_lock lk( _clientsLock );
ClientCache::iterator i = _clients.find( clientId );
if ( i != _clients.end() )
return i->second;
@@ -169,7 +169,7 @@ namespace mongo {
}
map<int,ClientInfo*> ClientInfo::_clients;
- boost::mutex ClientInfo::_clientsLock;
+ mongo::mutex ClientInfo::_clientsLock;
boost::thread_specific_ptr<ClientInfo> ClientInfo::_tlInfo;
} // namespace mongo
diff --git a/s/request.h b/s/request.h
index 689216c..2c02724 100644
--- a/s/request.h
+++ b/s/request.h
@@ -1,4 +1,20 @@
// request.h
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
#pragma once
@@ -111,7 +127,7 @@ namespace mongo {
set<string> * _prev;
int _lastAccess;
- static boost::mutex _clientsLock;
+ static mongo::mutex _clientsLock;
static ClientCache _clients;
static boost::thread_specific_ptr<ClientInfo> _tlInfo;
};
diff --git a/s/s_only.cpp b/s/s_only.cpp
index d692ff2..78310fd 100644
--- a/s/s_only.cpp
+++ b/s/s_only.cpp
@@ -18,6 +18,7 @@
#include "../stdafx.h"
#include "../client/dbclient.h"
#include "../db/dbhelpers.h"
+#include "../db/matcher.h"
namespace mongo {
@@ -26,4 +27,7 @@ namespace mongo {
auto_ptr<CursorIterator> i;
return i;
}
+
+ // need this stub to reduce mongos link dependencies
+ inline Matcher::~Matcher() { assert(!"this shouldn't be called"); }
}
diff --git a/s/server.cpp b/s/server.cpp
index 4868caf..3644376 100644
--- a/s/server.cpp
+++ b/s/server.cpp
@@ -30,10 +30,10 @@
namespace mongo {
Database *database = 0;
+ string mongosCommand;
string ourHostname;
OID serverID;
bool dbexitCalled = false;
- CmdLine cmdLine;
bool inShutdown(){
return dbexitCalled;
@@ -47,12 +47,12 @@ namespace mongo {
assert( 0 );
return false;
}
-
+
void usage( char * argv[] ){
out() << argv[0] << " usage:\n\n";
- out() << " -v+ verbose\n";
+ out() << " -v+ verbose 1: general 2: more 3: per request 4: more\n";
out() << " --port <portno>\n";
- out() << " --configdb <configdbname> [<configdbname>...]\n";
+ out() << " --configdb <configdbname>,[<configdbname>,<configdbname>]\n";
out() << endl;
}
@@ -88,10 +88,20 @@ namespace mongo {
}
}
};
+
+ void sighandler(int sig){
+ dbexit(EXIT_CLEAN, (string("recieved signal ") + BSONObjBuilder::numStr(sig)).c_str());
+ }
+ void setupSignals(){
+ // needed for cmdLine, btu we do it in init()
+ }
+
void init(){
serverID.init();
setupSIGTRAPforGDB();
+ signal(SIGTERM, sighandler);
+ signal(SIGINT, sighandler);
}
void start() {
@@ -108,55 +118,83 @@ namespace mongo {
return 0;
}
+ void printShardingVersionInfo(){
+ log() << mongosCommand << " v0.3 (alpha 3) starting (--help for usage)" << endl;
+ printGitVersion();
+ printSysInfo();
+ }
+
} // namespace mongo
using namespace mongo;
+#include <boost/program_options.hpp>
+
+namespace po = boost::program_options;
+
int main(int argc, char* argv[], char *envp[] ) {
+ static StaticObserver staticObserver;
+ mongosCommand = argv[0];
+
+ po::options_description options("Sharding options");
+ po::options_description hidden("Hidden options");
+ po::positional_options_description positional;
- bool justTests = false;
- vector<string> configdbs;
+ CmdLine::addGlobalOptions( options , hidden );
- for (int i = 1; i < argc; i++) {
- if ( argv[i] == 0 ) continue;
- string s = argv[i];
- if ( s == "--port" ) {
- cmdLine.port = atoi(argv[++i]);
- }
- else if ( s == "--configdb" ) {
-
- while ( ++i < argc )
- configdbs.push_back(argv[i]);
-
- if ( configdbs.size() == 0 ) {
- out() << "error: no args for --configdb\n";
- return 4;
- }
-
- if ( configdbs.size() > 2 ) {
- out() << "error: --configdb does not support more than 2 parameters yet\n";
- return 5;
- }
- }
- else if ( s.find( "-v" ) == 0 ){
- logLevel = s.size() - 1;
- }
- else if ( s == "--test" ) {
- justTests = true;
- logLevel = 5;
- }
- else {
- usage( argv );
- return 3;
- }
- }
+ options.add_options()
+ ( "configdb" , po::value<string>() , "1 or 3 comma separated config servers" )
+ ( "test" , "just run unit tests" )
+ ;
+
+
+ // parse options
+ po::variables_map params;
+ if ( ! CmdLine::store( argc , argv , options , hidden , positional , params ) )
+ return 0;
- if ( justTests ){
+ if ( params.count( "help" ) ){
+ cout << options << endl;
+ return 0;
+ }
+
+ if ( params.count( "version" ) ){
+ printShardingVersionInfo();
+ return 0;
+ }
+
+
+ if ( params.count( "test" ) ){
+ logLevel = 5;
UnitTest::runTests();
cout << "tests passed" << endl;
return 0;
}
+ if ( ! params.count( "configdb" ) ){
+ out() << "error: no args for --configdb" << endl;
+ return 4;
+ }
+
+ vector<string> configdbs;
+ {
+ string s = params["configdb"].as<string>();
+ while ( true ){
+ size_t idx = s.find( ',' );
+ if ( idx == string::npos ){
+ configdbs.push_back( s );
+ break;
+ }
+ configdbs.push_back( s.substr( 0 , idx ) );
+ s = s.substr( idx + 1 );
+ }
+ }
+
+ if ( configdbs.size() != 1 && configdbs.size() != 3 ){
+ out() << "need either 1 or 3 configdbs" << endl;
+ return 5;
+ }
+
pool.addHook( &shardingConnectionHook );
if ( argc <= 1 ) {
@@ -170,24 +208,26 @@ int main(int argc, char* argv[], char *envp[] ) {
usage( argv );
return 1;
}
-
- log() << argv[0] << " v0.3- (alpha 3t) starting (--help for usage)" << endl;
- printGitVersion();
- printSysInfo();
+
+ printShardingVersionInfo();
if ( ! configServer.init( configdbs ) ){
cout << "couldn't connectd to config db" << endl;
return 7;
}
- assert( configServer.ok() );
+ if ( ! configServer.ok() ){
+ cout << "configServer startup check failed" << endl;
+ return 8;
+ }
int configError = configServer.checkConfigVersion();
if ( configError ){
cout << "config server error: " << configError << endl;
return configError;
}
-
+ configServer.reloadSettings();
+
init();
start();
dbexit( EXIT_CLEAN );
diff --git a/s/strategy.cpp b/s/strategy.cpp
index b485bd2..b7277e3 100644
--- a/s/strategy.cpp
+++ b/s/strategy.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
// stragegy.cpp
#include "stdafx.h"
@@ -25,14 +41,12 @@ namespace mongo {
void Strategy::doQuery( Request& r , string server ){
try{
ScopedDbConnection dbcon( server );
- DBClientBase &_c = dbcon.conn();
+ DBClientBase &c = dbcon.conn();
- checkShardVersion( _c , r.getns() );
+ checkShardVersion( c , r.getns() );
- // TODO: This will not work with Paired connections. Fix.
- DBClientConnection&c = dynamic_cast<DBClientConnection&>(_c);
Message response;
- bool ok = c.port().call( r.m(), response);
+ bool ok = c.call( r.m(), response);
{
QueryResult *qr = (QueryResult *) response.data;
diff --git a/s/strategy.h b/s/strategy.h
index e4b93b5..a656f60 100644
--- a/s/strategy.h
+++ b/s/strategy.h
@@ -1,4 +1,20 @@
// strategy.h
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
#pragma once
diff --git a/s/strategy_shard.cpp b/s/strategy_shard.cpp
index 34cf226..9107f16 100644
--- a/s/strategy_shard.cpp
+++ b/s/strategy_shard.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
// strategy_sharded.cpp
#include "stdafx.h"
diff --git a/s/strategy_single.cpp b/s/strategy_single.cpp
index 9cf8a63..8f157d5 100644
--- a/s/strategy_single.cpp
+++ b/s/strategy_single.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
// strategy_simple.cpp
#include "stdafx.h"
diff --git a/s/util.h b/s/util.h
index ba40a29..2c62642 100644
--- a/s/util.h
+++ b/s/util.h
@@ -35,6 +35,7 @@ namespace mongo {
stringstream s;
s << "StaleConfigException ns: " << ns << " " << msg;
_msg = s.str();
+ log(1) << _msg << endl;
}
virtual ~StaleConfigException() throw(){}
diff --git a/scripting/engine.cpp b/scripting/engine.cpp
index dc088fb..cc245b6 100644
--- a/scripting/engine.cpp
+++ b/scripting/engine.cpp
@@ -164,9 +164,12 @@ namespace mongo {
_loadedVersion = _lastVersion;
string coll = _localDBName + ".system.js";
-
+
static DBClientBase * db = createDirectClient();
auto_ptr<DBClientCursor> c = db->query( coll , Query() );
+
+ set<string> thisTime;
+
while ( c->more() ){
BSONObj o = c->next();
@@ -177,6 +180,26 @@ namespace mongo {
uassert( 10210 , "value has to be set" , v.type() != EOO );
setElement( n.valuestr() , v );
+
+ thisTime.insert( n.valuestr() );
+ _storedNames.insert( n.valuestr() );
+
+ }
+
+ // --- remove things from scope that were removed
+
+ list<string> toremove;
+
+ for ( set<string>::iterator i=_storedNames.begin(); i!=_storedNames.end(); i++ ){
+ string n = *i;
+ if ( thisTime.count( n ) == 0 )
+ toremove.push_back( n );
+ }
+
+ for ( list<string>::iterator i=toremove.begin(); i!=toremove.end(); i++ ){
+ string n = *i;
+ _storedNames.erase( n );
+ execSetup( (string)"delete " + n , "clean up scope" );
}
}
@@ -220,7 +243,7 @@ namespace mongo {
}
void done( const string& pool , Scope * s ){
- boostlock lk( _mutex );
+ scoped_lock lk( _mutex );
list<Scope*> & l = _pools[pool];
if ( l.size() > 10 ){
delete s;
@@ -232,7 +255,7 @@ namespace mongo {
}
Scope * get( const string& pool ){
- boostlock lk( _mutex );
+ scoped_lock lk( _mutex );
list<Scope*> & l = _pools[pool];
if ( l.size() == 0 )
return 0;
@@ -260,7 +283,7 @@ namespace mongo {
private:
PoolToScopes _pools;
- boost::mutex _mutex;
+ mongo::mutex _mutex;
int _magic;
};
@@ -395,5 +418,8 @@ namespace mongo {
}
}
+ void ( *ScriptEngine::_connectCallback )( DBClientWithCommands & ) = 0;
+
ScriptEngine * globalScriptEngine;
}
+ \ No newline at end of file
diff --git a/scripting/engine.h b/scripting/engine.h
index 99c88cf..9907d31 100644
--- a/scripting/engine.h
+++ b/scripting/engine.h
@@ -26,7 +26,7 @@ namespace mongo {
typedef unsigned long long ScriptingFunction;
typedef BSONObj (*NativeFunction) ( const BSONObj &args );
-
+
class Scope : boost::noncopyable {
public:
Scope();
@@ -111,12 +111,17 @@ namespace mongo {
string _localDBName;
long long _loadedVersion;
+ set<string> _storedNames;
static long long _lastVersion;
map<string,ScriptingFunction> _cachedFunctions;
static int _numScopes;
};
+ void installGlobalUtils( Scope& scope );
+
+ class DBClientWithCommands;
+
class ScriptEngine : boost::noncopyable {
public:
ScriptEngine();
@@ -126,6 +131,7 @@ namespace mongo {
Scope *s = createScope();
if ( s && _scopeInitCallback )
_scopeInitCallback( *s );
+ installGlobalUtils( *s );
return s;
}
@@ -142,12 +148,18 @@ namespace mongo {
virtual auto_ptr<Unlocker> newThreadUnlocker() { return auto_ptr< Unlocker >( new Unlocker ); }
void setScopeInitCallback( void ( *func )( Scope & ) ) { _scopeInitCallback = func; }
+ static void setConnectCallback( void ( *func )( DBClientWithCommands& ) ) { _connectCallback = func; }
+ static void runConnectCallback( DBClientWithCommands &c ) {
+ if ( _connectCallback )
+ _connectCallback( c );
+ }
protected:
virtual Scope * createScope() = 0;
private:
void ( *_scopeInitCallback )( Scope & );
+ static void ( *_connectCallback )( DBClientWithCommands & );
};
extern ScriptEngine * globalScriptEngine;
diff --git a/scripting/engine_spidermonkey.cpp b/scripting/engine_spidermonkey.cpp
index d75a734..6609925 100644
--- a/scripting/engine_spidermonkey.cpp
+++ b/scripting/engine_spidermonkey.cpp
@@ -32,6 +32,11 @@
return JS_FALSE; \
}
+#define CHECKNEWOBJECT(xx,ctx,w) \
+ if ( ! xx ){ \
+ massert(13072,(string)"JS_NewObject failed: " + w ,xx); \
+ }
+
namespace mongo {
string trim( string s ){
@@ -45,8 +50,8 @@ namespace mongo {
}
boost::thread_specific_ptr<SMScope> currentScope( dontDeleteScope );
- boost::recursive_mutex smmutex;
-#define smlock recursive_boostlock ___lk( smmutex );
+ boost::recursive_mutex &smmutex = *( new boost::recursive_mutex );
+#define smlock recursive_scoped_lock ___lk( smmutex );
#define GETHOLDER(x,o) ((BSONHolder*)JS_GetPrivate( x , o ))
@@ -158,6 +163,19 @@ namespace mongo {
return toString( JS_ValueToString( _context , v ) );
}
+ // NOTE No validation of passed in object
+ long long toNumberLongUnsafe( JSObject *o ) {
+ boost::uint64_t val;
+ if ( hasProperty( o, "top" ) ) {
+ val =
+ ( (boost::uint64_t)(boost::uint32_t)getNumber( o , "top" ) << 32 ) +
+ ( boost::uint32_t)( getNumber( o , "bottom" ) );
+ } else {
+ val = (boost::uint64_t) getNumber( o, "floatApprox" );
+ }
+ return val;
+ }
+
double toNumber( jsval v ){
double d;
uassert( 10214 , "not a number" , JS_ValueToNumber( _context , v , &d ) );
@@ -180,7 +198,7 @@ namespace mongo {
return oid;
}
- BSONObj toObject( JSObject * o ){
+ BSONObj toObject( JSObject * o , int depth = 0){
if ( ! o )
return BSONObj();
@@ -204,9 +222,11 @@ namespace mongo {
if ( ! appendSpecialDBObject( this , b , "value" , OBJECT_TO_JSVAL( o ) , o ) ){
- jsval theid = getProperty( o , "_id" );
- if ( ! JSVAL_IS_VOID( theid ) ){
- append( b , "_id" , theid );
+ if ( depth == 0 ){
+ jsval theid = getProperty( o , "_id" );
+ if ( ! JSVAL_IS_VOID( theid ) ){
+ append( b , "_id" , theid , EOO , depth + 1 );
+ }
}
JSIdArray * properties = JS_Enumerate( _context , o );
@@ -217,10 +237,10 @@ namespace mongo {
jsval nameval;
assert( JS_IdToValue( _context ,id , &nameval ) );
string name = toString( nameval );
- if ( name == "_id" )
+ if ( depth == 0 && name == "_id" )
continue;
- append( b , name , getProperty( o , name.c_str() ) , orig[name].type() );
+ append( b , name , getProperty( o , name.c_str() ) , orig[name].type() , depth + 1 );
}
JS_DestroyIdArray( _context , properties );
@@ -254,7 +274,7 @@ namespace mongo {
b.appendRegex( name.c_str() , s.substr( 0 , end ).c_str() , s.substr( end + 1 ).c_str() );
}
- void append( BSONObjBuilder& b , string name , jsval val , BSONType oldType = EOO ){
+ void append( BSONObjBuilder& b , string name , jsval val , BSONType oldType = EOO , int depth=0 ){
//cout << "name: " << name << "\t" << typeString( val ) << " oldType: " << oldType << endl;
switch ( JS_TypeOfValue( _context , val ) ){
@@ -278,7 +298,7 @@ namespace mongo {
b.appendNull( name.c_str() );
}
else if ( ! appendSpecialDBObject( this , b , name , val , o ) ){
- BSONObj sub = toObject( o );
+ BSONObj sub = toObject( o , depth );
if ( JS_IsArrayObject( _context , o ) ){
b.appendArray( name.c_str() , sub );
}
@@ -389,12 +409,12 @@ namespace mongo {
paramString = trim( paramString );
}
- const char ** paramArray = new const char*[params.size()];
+ boost::scoped_array<const char *> paramArray (new const char*[params.size()]);
for ( size_t i=0; i<params.size(); i++ )
paramArray[i] = params[i].c_str();
- JSFunction * func = JS_CompileFunction( _context , assoc , fname.str().c_str() , params.size() , paramArray , code.c_str() , strlen( code.c_str() ) , "nofile_b" , 0 );
- delete paramArray;
+ JSFunction * func = JS_CompileFunction( _context , assoc , fname.str().c_str() , params.size() , paramArray.get() , code.c_str() , strlen( code.c_str() ) , "nofile_b" , 0 );
+
if ( ! func ){
cout << "compile failed for: " << raw << endl;
return 0;
@@ -444,13 +464,12 @@ namespace mongo {
static string ref = "$ref";
if ( ref == obj->firstElement().fieldName() ){
JSObject * o = JS_NewObject( _context , &dbref_class , NULL, NULL);
- assert( o );
- setProperty( o , "$ref" , toval( obj->firstElement() ) );
- setProperty( o , "$id" , toval( (*obj)["$id"] ) );
+ CHECKNEWOBJECT(o,_context,"toJSObject1");
+ assert( JS_SetPrivate( _context , o , (void*)(new BSONHolder( obj->getOwned() ) ) ) );
return o;
}
JSObject * o = JS_NewObject( _context , readOnly ? &bson_ro_class : &bson_class , NULL, NULL);
- assert( o );
+ CHECKNEWOBJECT(o,_context,"toJSObject2");
assert( JS_SetPrivate( _context , o , (void*)(new BSONHolder( obj->getOwned() ) ) ) );
return o;
}
@@ -469,7 +488,6 @@ namespace mongo {
return JSVAL_NULL;
case NumberDouble:
case NumberInt:
- case NumberLong:
return toval( e.number() );
case Symbol: // TODO: should we make a special class for this
case String:
@@ -505,6 +523,7 @@ namespace mongo {
case jstOID:{
OID oid = e.__oid();
JSObject * o = JS_NewObject( _context , &object_id_class , 0 , 0 );
+ CHECKNEWOBJECT(o,_context,"jstOID");
setProperty( o , "str" , toval( oid.str().c_str() ) );
return OBJECT_TO_JSVAL( o );
}
@@ -553,16 +572,31 @@ namespace mongo {
case Timestamp: {
JSObject * o = JS_NewObject( _context , &timestamp_class , 0 , 0 );
+ CHECKNEWOBJECT(o,_context,"Timestamp1");
setProperty( o , "t" , toval( (double)(e.timestampTime()) ) );
setProperty( o , "i" , toval( (double)(e.timestampInc()) ) );
return OBJECT_TO_JSVAL( o );
}
-
+ case NumberLong: {
+ boost::uint64_t val = (boost::uint64_t)e.numberLong();
+ JSObject * o = JS_NewObject( _context , &numberlong_class , 0 , 0 );
+ CHECKNEWOBJECT(o,_context,"NumberLong1");
+ setProperty( o , "floatApprox" , toval( (double)(boost::int64_t)( val ) ) );
+ if ( (boost::int64_t)val != (boost::int64_t)(double)(boost::int64_t)( val ) ) {
+ // using 2 doubles here instead of a single double because certain double
+ // bit patterns represent undefined values and sm might trash them
+ setProperty( o , "top" , toval( (double)(boost::uint32_t)( val >> 32 ) ) );
+ setProperty( o , "bottom" , toval( (double)(boost::uint32_t)( val & 0x00000000ffffffff ) ) );
+ }
+ return OBJECT_TO_JSVAL( o );
+ }
case DBRef: {
JSObject * o = JS_NewObject( _context , &dbpointer_class , 0 , 0 );
+ CHECKNEWOBJECT(o,_context,"DBRef1");
setProperty( o , "ns" , toval( e.dbrefNS() ) );
JSObject * oid = JS_NewObject( _context , &object_id_class , 0 , 0 );
+ CHECKNEWOBJECT(oid,_context,"DBRef2");
setProperty( oid , "str" , toval( e.dbrefOID().str().c_str() ) );
setProperty( o , "id" , OBJECT_TO_JSVAL( oid ) );
@@ -570,9 +604,10 @@ namespace mongo {
}
case BinData:{
JSObject * o = JS_NewObject( _context , &bindata_class , 0 , 0 );
+ CHECKNEWOBJECT(o,_context,"Bindata_BinData1");
int len;
- void * data = (void*)e.binData( len );
- assert( JS_SetPrivate( _context , o , data ) );
+ const char * data = e.binData( len );
+ assert( JS_SetPrivate( _context , o , new BinDataHolder( data ) ) );
setProperty( o , "len" , toval( len ) );
setProperty( o , "type" , toval( (int)e.binDataType() ) );
@@ -753,6 +788,8 @@ namespace mongo {
JSBool mark_modified( JSContext *cx, JSObject *obj, jsval idval, jsval *vp){
Convertor c(cx);
BSONHolder * holder = GETHOLDER( cx , obj );
+ if ( !holder ) // needed when we're messing with DBRef.prototype
+ return JS_TRUE;
if ( holder->_inResolve )
return JS_TRUE;
holder->_modified = true;
@@ -812,7 +849,15 @@ namespace mongo {
a = args.obj();
}
- BSONObj out = func( a );
+
+ BSONObj out;
+ try {
+ out = func( a );
+ }
+ catch ( std::exception& e ){
+ JS_ReportError( cx , e.what() );
+ return JS_FALSE;
+ }
if ( out.isEmpty() ){
*rval = JSVAL_VOID;
@@ -1346,6 +1391,7 @@ namespace mongo {
}
virtual void gc(){
+ smlock;
JS_GC( _context );
}
@@ -1382,7 +1428,7 @@ namespace mongo {
stringstream ss;
ss << "JS Error: " << message;
- if ( report ){
+ if ( report && report->filename ){
ss << " " << report->filename << ":" << report->lineno;
}
diff --git a/scripting/engine_spidermonkey.h b/scripting/engine_spidermonkey.h
index 8aeb56c..a39d8fb 100644
--- a/scripting/engine_spidermonkey.h
+++ b/scripting/engine_spidermonkey.h
@@ -93,6 +93,7 @@ namespace mongo {
extern JSClass dbref_class;
extern JSClass bindata_class;
extern JSClass timestamp_class;
+ extern JSClass numberlong_class;
extern JSClass minkey_class;
extern JSClass maxkey_class;
@@ -112,5 +113,22 @@ namespace mongo {
#define JSVAL_IS_OID(v) ( JSVAL_IS_OBJECT( v ) && JS_InstanceOf( cx , JSVAL_TO_OBJECT( v ) , &object_id_class , 0 ) )
bool isDate( JSContext * cx , JSObject * o );
-
+
+ // JS private data must be 2byte aligned, so we use a holder to refer to an unaligned pointer.
+ struct BinDataHolder {
+ BinDataHolder( const char *c, int copyLen = -1 ) :
+ c_( const_cast< char * >( c ) ),
+ iFree_( copyLen != -1 ) {
+ if ( copyLen != -1 ) {
+ c_ = (char*)malloc( copyLen );
+ memcpy( c_, c, copyLen );
+ }
+ }
+ ~BinDataHolder() {
+ if ( iFree_ )
+ free( c_ );
+ }
+ char *c_;
+ bool iFree_;
+ };
}
diff --git a/scripting/sm_db.cpp b/scripting/sm_db.cpp
index 72d8638..1c15170 100644
--- a/scripting/sm_db.cpp
+++ b/scripting/sm_db.cpp
@@ -18,6 +18,7 @@
// hacked in right now from engine_spidermonkey.cpp
#include "../client/syncclusterconnection.h"
+#include "../util/base64.h"
namespace mongo {
@@ -101,7 +102,6 @@ namespace mongo {
*rval = c.toval( &n );
return JS_TRUE;
}
-
JSFunctionSpec internal_cursor_functions[] = {
{ "hasNext" , internal_cursor_hasNext , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
@@ -144,42 +144,36 @@ namespace mongo {
string host = "127.0.0.1";
if ( argc > 0 )
host = c.toString( argv[0] );
+
+ int numCommas = DBClientBase::countCommas( host );
shared_ptr< DBClientWithCommands > conn;
string errmsg;
- if ( host.find( "," ) == string::npos ){
+ if ( numCommas == 0 ){
DBClientConnection * c = new DBClientConnection( true );
conn.reset( c );
if ( ! c->connect( host , errmsg ) ){
JS_ReportError( cx , ((string)"couldn't connect: " + errmsg).c_str() );
return JS_FALSE;
}
+ ScriptEngine::runConnectCallback( *c );
}
- else { // paired
- int numCommas = 0;
- for ( uint i=0; i<host.size(); i++ )
- if ( host[i] == ',' )
- numCommas++;
-
- assert( numCommas > 0 );
-
- if ( numCommas == 1 ){
- DBClientPaired * c = new DBClientPaired();
- conn.reset( c );
- if ( ! c->connect( host ) ){
- JS_ReportError( cx , "couldn't connect to pair" );
+ else if ( numCommas == 1 ){ // paired
+ DBClientPaired * c = new DBClientPaired();
+ conn.reset( c );
+ if ( ! c->connect( host ) ){
+ JS_ReportError( cx , "couldn't connect to pair" );
return JS_FALSE;
- }
- }
- else if ( numCommas == 2 ){
- conn.reset( new SyncCluterConnection( host ) );
- }
- else {
- JS_ReportError( cx , "1 (paired) or 2(quorum) commas are allowed" );
- return JS_FALSE;
}
}
+ else if ( numCommas == 2 ){
+ conn.reset( new SyncClusterConnection( host ) );
+ }
+ else {
+ JS_ReportError( cx , "1 (paired) or 2(quorum) commas are allowed" );
+ return JS_FALSE;
+ }
assert( JS_SetPrivate( cx , obj , (void*)( new shared_ptr< DBClientWithCommands >( conn ) ) ) );
@@ -211,7 +205,7 @@ namespace mongo {
};
JSBool mongo_find(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
- uassert( 10240 , "mongo_find neesd 5 args" , argc == 5 );
+ uassert( 10240 , "mongo_find neesd 6 args" , argc == 6 );
shared_ptr< DBClientWithCommands > * connHolder = (shared_ptr< DBClientWithCommands >*)JS_GetPrivate( cx , obj );
uassert( 10241 , "no connection!" , connHolder && connHolder->get() );
DBClientWithCommands *conn = connHolder->get();
@@ -226,15 +220,18 @@ namespace mongo {
int nToReturn = (int) c.toNumber( argv[3] );
int nToSkip = (int) c.toNumber( argv[4] );
bool slaveOk = c.getBoolean( obj , "slaveOk" );
+ int batchSize = (int) c.toNumber( argv[5] );
try {
- auto_ptr<DBClientCursor> cursor = conn->query( ns , q , nToReturn , nToSkip , f.nFields() ? &f : 0 , slaveOk ? QueryOption_SlaveOk : 0 );
+ auto_ptr<DBClientCursor> cursor = conn->query( ns , q , nToReturn , nToSkip , f.nFields() ? &f : 0 , slaveOk ? QueryOption_SlaveOk : 0 , batchSize );
if ( ! cursor.get() ){
+ log() << "query failed : " << ns << " " << q << " to: " << conn->toString() << endl;
JS_ReportError( cx , "error doing query: failed" );
return JS_FALSE;
}
JSObject * mycursor = JS_NewObject( cx , &internal_cursor_class , 0 , 0 );
+ CHECKNEWOBJECT( mycursor, cx, "internal_cursor_class" );
assert( JS_SetPrivate( cx , mycursor , new CursorHolder( cursor, *connHolder ) ) );
*rval = OBJECT_TO_JSVAL( mycursor );
return JS_TRUE;
@@ -412,6 +409,7 @@ namespace mongo {
assert( c.hasProperty( db , "_name" ) );
JSObject * coll = JS_NewObject( cx , &db_collection_class , 0 , 0 );
+ CHECKNEWOBJECT( coll, cx, "doCreateCollection" );
c.setProperty( coll , "_mongo" , c.getProperty( db , "_mongo" ) );
c.setProperty( coll , "_db" , OBJECT_TO_JSVAL( db ) );
c.setProperty( coll , "_shortName" , c.toval( shortName.c_str() ) );
@@ -499,7 +497,7 @@ namespace mongo {
if ( ! JS_InstanceOf( cx , obj , &object_id_class , 0 ) ){
obj = JS_NewObject( cx , &object_id_class , 0 , 0 );
- assert( obj );
+ CHECKNEWOBJECT( obj, cx, "object_id_constructor" );
*rval = OBJECT_TO_JSVAL( obj );
}
@@ -526,6 +524,7 @@ namespace mongo {
{ 0 }
};
+
// dbpointer
JSBool dbpointer_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
@@ -562,46 +561,82 @@ namespace mongo {
JSBool dbref_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
Convertor c( cx );
-
+
if ( argc == 2 ){
- assert( JS_SetProperty( cx , obj , "$ref" , &(argv[0]) ) );
- assert( JS_SetProperty( cx , obj , "$id" , &(argv[1]) ) );
+ JSObject * o = JS_NewObject( cx , NULL , NULL, NULL );
+ CHECKNEWOBJECT( o, cx, "dbref_constructor" );
+ assert( JS_SetProperty( cx, o , "$ref" , &argv[ 0 ] ) );
+ assert( JS_SetProperty( cx, o , "$id" , &argv[ 1 ] ) );
+ BSONObj bo = c.toObject( o );
+ assert( JS_SetPrivate( cx , obj , (void*)(new BSONHolder( bo.getOwned() ) ) ) );
return JS_TRUE;
}
else {
JS_ReportError( cx , "DBRef needs 2 arguments" );
+ assert( JS_SetPrivate( cx , obj , (void*)(new BSONHolder( BSONObj().getOwned() ) ) ) );
return JS_FALSE;
}
}
- JSClass dbref_class = {
- "DBRef" , JSCLASS_HAS_PRIVATE ,
- JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
- JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, JS_FinalizeStub,
- JSCLASS_NO_OPTIONAL_MEMBERS
- };
-
- JSFunctionSpec dbref_functions[] = {
- { 0 }
- };
-
+ JSClass dbref_class = bson_class; // name will be fixed later
// BinData
JSBool bindata_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
- JS_ReportError( cx , "can't create a BinData yet" );
- return JS_FALSE;
+ Convertor c( cx );
+
+ if ( argc == 2 ){
+
+ int type = (int)c.toNumber( argv[ 0 ] );
+ string encoded = c.toString( argv[ 1 ] );
+ string decoded = base64::decode( encoded );
+
+ assert( JS_SetPrivate( cx, obj, new BinDataHolder( decoded.data(), decoded.length() ) ) );
+ c.setProperty( obj, "len", c.toval( decoded.length() ) );
+ c.setProperty( obj, "type", c.toval( type ) );
+
+ return JS_TRUE;
+ }
+ else {
+ JS_ReportError( cx , "BinData needs 2 arguments" );
+ return JS_FALSE;
+ }
}
+ JSBool bindata_tostring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+ Convertor c(cx);
+ int type = (int)c.getNumber( obj , "type" );
+ int len = (int)c.getNumber( obj, "len" );
+ void *holder = JS_GetPrivate( cx, obj );
+ assert( holder );
+ const char *data = ( ( BinDataHolder* )( holder ) )->c_;
+ stringstream ss;
+ ss << "BinData( type: " << type << ", base64: \"";
+ base64::encode( ss, (const char *)data, len );
+ ss << "\" )";
+ string ret = ss.str();
+ return *rval = c.toval( ret.c_str() );
+ }
+
+ void bindata_finalize( JSContext * cx , JSObject * obj ){
+ Convertor c(cx);
+ void *holder = JS_GetPrivate( cx, obj );
+ if ( holder ){
+ delete ( BinDataHolder* )holder;
+ assert( JS_SetPrivate( cx , obj , 0 ) );
+ }
+ }
+
JSClass bindata_class = {
"BinData" , JSCLASS_HAS_PRIVATE ,
JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
- JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, JS_FinalizeStub,
+ JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, bindata_finalize,
JSCLASS_NO_OPTIONAL_MEMBERS
};
JSFunctionSpec bindata_functions[] = {
+ { "toString" , bindata_tostring , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
{ 0 }
};
@@ -618,7 +653,7 @@ namespace mongo {
}
JSObject * array = JS_NewObject( cx , 0 , 0 , 0 );
- assert( array );
+ CHECKNEWOBJECT( array, cx, "map_constructor" );
jsval a = OBJECT_TO_JSVAL( array );
JS_SetProperty( cx , obj , "_data" , &a );
@@ -656,7 +691,38 @@ namespace mongo {
JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, JS_FinalizeStub,
JSCLASS_NO_OPTIONAL_MEMBERS
};
+
+ JSClass numberlong_class = {
+ "NumberLong" , JSCLASS_HAS_PRIVATE ,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+ JSBool numberlong_valueof(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+ Convertor c(cx);
+ return *rval = c.toval( double( c.toNumberLongUnsafe( obj ) ) );
+ }
+
+ JSBool numberlong_tonumber(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+ return numberlong_valueof( cx, obj, argc, argv, rval );
+ }
+
+ JSBool numberlong_tostring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+ Convertor c(cx);
+ stringstream ss;
+ ss << c.toNumberLongUnsafe( obj );
+ string ret = ss.str();
+ return *rval = c.toval( ret.c_str() );
+ }
+ JSFunctionSpec numberlong_functions[] = {
+ { "valueOf" , numberlong_valueof , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "toNumber" , numberlong_tonumber , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "toString" , numberlong_tostring , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { 0 }
+ };
+
JSClass minkey_class = {
"MinKey" , JSCLASS_HAS_PRIVATE ,
JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
@@ -684,8 +750,11 @@ namespace mongo {
if ( argc > 4 && JSVAL_IS_OBJECT( argv[4] ) )
c.setProperty( obj , "_query" , argv[4] );
- else
- c.setProperty( obj , "_query" , OBJECT_TO_JSVAL( JS_NewObject( cx , 0 , 0 , 0 ) ) );
+ else {
+ JSObject * temp = JS_NewObject( cx , 0 , 0 , 0 );
+ CHECKNEWOBJECT( temp, cx, "dbquery_constructor" );
+ c.setProperty( obj , "_query" , OBJECT_TO_JSVAL( temp ) );
+ }
if ( argc > 5 && JSVAL_IS_OBJECT( argv[5] ) )
c.setProperty( obj , "_fields" , argv[5] );
@@ -702,6 +771,11 @@ namespace mongo {
c.setProperty( obj , "_skip" , argv[7] );
else
c.setProperty( obj , "_skip" , JSVAL_ZERO );
+
+ if ( argc > 8 && JSVAL_IS_NUMBER( argv[8] ) )
+ c.setProperty( obj , "_batchSize" , argv[8] );
+ else
+ c.setProperty( obj , "_batchSize" , JSVAL_ZERO );
c.setProperty( obj , "_cursor" , JSVAL_NULL );
c.setProperty( obj , "_numReturned" , JSVAL_ZERO );
@@ -744,10 +818,10 @@ namespace mongo {
assert( JS_InitClass( cx , global , 0 , &internal_cursor_class , internal_cursor_constructor , 0 , 0 , internal_cursor_functions , 0 , 0 ) );
assert( JS_InitClass( cx , global , 0 , &dbquery_class , dbquery_constructor , 0 , 0 , 0 , 0 , 0 ) );
assert( JS_InitClass( cx , global , 0 , &dbpointer_class , dbpointer_constructor , 0 , 0 , dbpointer_functions , 0 , 0 ) );
- assert( JS_InitClass( cx , global , 0 , &dbref_class , dbref_constructor , 0 , 0 , dbref_functions , 0 , 0 ) );
assert( JS_InitClass( cx , global , 0 , &bindata_class , bindata_constructor , 0 , 0 , bindata_functions , 0 , 0 ) );
assert( JS_InitClass( cx , global , 0 , &timestamp_class , 0 , 0 , 0 , 0 , 0 , 0 ) );
+ assert( JS_InitClass( cx , global , 0 , &numberlong_class , 0 , 0 , 0 , numberlong_functions , 0 , 0 ) );
assert( JS_InitClass( cx , global , 0 , &minkey_class , 0 , 0 , 0 , 0 , 0 , 0 ) );
assert( JS_InitClass( cx , global , 0 , &maxkey_class , 0 , 0 , 0 , 0 , 0 , 0 ) );
@@ -756,6 +830,10 @@ namespace mongo {
assert( JS_InitClass( cx , global , 0 , &bson_ro_class , bson_cons , 0 , 0 , bson_functions , 0 , 0 ) );
assert( JS_InitClass( cx , global , 0 , &bson_class , bson_cons , 0 , 0 , bson_functions , 0 , 0 ) );
+ static const char *dbrefName = "DBRef";
+ dbref_class.name = dbrefName;
+ assert( JS_InitClass( cx , global , 0 , &dbref_class , dbref_constructor , 2 , 0 , bson_functions , 0 , 0 ) );
+
scope->exec( jsconcatcode );
}
@@ -783,15 +861,22 @@ namespace mongo {
return true;
}
+ if ( JS_InstanceOf( c->_context , o , &numberlong_class , 0 ) ){
+ b.append( name.c_str() , c->toNumberLongUnsafe( o ) );
+ return true;
+ }
+
if ( JS_InstanceOf( c->_context , o , &dbpointer_class , 0 ) ){
b.appendDBRef( name.c_str() , c->getString( o , "ns" ).c_str() , c->toOID( c->getProperty( o , "id" ) ) );
return true;
}
if ( JS_InstanceOf( c->_context , o , &bindata_class , 0 ) ){
+ void *holder = JS_GetPrivate( c->_context , o );
+ const char *data = ( ( BinDataHolder * )( holder ) )->c_;
b.appendBinData( name.c_str() ,
(int)(c->getNumber( o , "len" )) , (BinDataType)((char)(c->getNumber( o , "type" ) ) ) ,
- (char*)JS_GetPrivate( c->_context , o ) + 1
+ data
);
return true;
}
diff --git a/scripting/utils.cpp b/scripting/utils.cpp
new file mode 100644
index 0000000..21089ac
--- /dev/null
+++ b/scripting/utils.cpp
@@ -0,0 +1,52 @@
+// utils.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "stdafx.h"
+#include "engine.h"
+#include "../util/md5.hpp"
+
+namespace mongo {
+
+ BSONObj jsmd5( const BSONObj &a ){
+ uassert( 10261 , "js md5 needs a string" , a.firstElement().type() == String );
+ const char * s = a.firstElement().valuestrsafe();
+
+ md5digest d;
+ md5_state_t st;
+ md5_init(&st);
+ md5_append( &st , (const md5_byte_t*)s , strlen( s ) );
+ md5_finish(&st, d);
+
+ return BSON( "" << digestToString( d ) );
+ }
+
+ BSONObj JSVersion( const BSONObj& args ){
+ cout << "version: " << versionString << endl;
+ if ( strstr( versionString , "+" ) )
+ printGitVersion();
+ return BSONObj();
+ }
+
+ void installGlobalUtils( Scope& scope ){
+ scope.injectNative( "hex_md5" , jsmd5 );
+ scope.injectNative( "version" , JSVersion );
+ }
+
+}
+
+
diff --git a/scripting/v8_db.cpp b/scripting/v8_db.cpp
index 6d859d4..4d14a03 100644
--- a/scripting/v8_db.cpp
+++ b/scripting/v8_db.cpp
@@ -19,7 +19,8 @@
#include "v8_utils.h"
#include "v8_db.h"
#include "engine.h"
-
+#include "util/base64.h"
+#include "../client/syncclusterconnection.h"
#include <iostream>
using namespace std;
@@ -49,6 +50,26 @@ namespace mongo {
return mongo;
}
+ v8::Handle<v8::FunctionTemplate> getNumberLongFunctionTemplate() {
+ v8::Local<v8::FunctionTemplate> numberLong = FunctionTemplate::New( numberLongInit );
+ v8::Local<v8::Template> proto = numberLong->PrototypeTemplate();
+
+ proto->Set( v8::String::New( "valueOf" ) , FunctionTemplate::New( numberLongValueOf ) );
+ proto->Set( v8::String::New( "toNumber" ) , FunctionTemplate::New( numberLongToNumber ) );
+ proto->Set( v8::String::New( "toString" ) , FunctionTemplate::New( numberLongToString ) );
+
+ return numberLong;
+ }
+
+ v8::Handle<v8::FunctionTemplate> getBinDataFunctionTemplate() {
+ v8::Local<v8::FunctionTemplate> binData = FunctionTemplate::New( binDataInit );
+ v8::Local<v8::Template> proto = binData->PrototypeTemplate();
+
+ proto->Set( v8::String::New( "toString" ) , FunctionTemplate::New( binDataToString ) );
+
+ return binData;
+ }
+
void installDBTypes( Handle<ObjectTemplate>& global ){
v8::Local<v8::FunctionTemplate> db = FunctionTemplate::New( dbInit );
db->InstanceTemplate()->SetNamedPropertyHandler( collectionFallback );
@@ -69,7 +90,9 @@ namespace mongo {
global->Set( v8::String::New("DBPointer") , FunctionTemplate::New( dbPointerInit ) );
- global->Set( v8::String::New("BinData") , FunctionTemplate::New( binDataInit ) );
+ global->Set( v8::String::New("BinData") , getBinDataFunctionTemplate() );
+
+ global->Set( v8::String::New("NumberLong") , getNumberLongFunctionTemplate() );
}
@@ -93,7 +116,9 @@ namespace mongo {
global->Set( v8::String::New("DBPointer") , FunctionTemplate::New( dbPointerInit )->GetFunction() );
- global->Set( v8::String::New("BinData") , FunctionTemplate::New( binDataInit )->GetFunction() );
+ global->Set( v8::String::New("BinData") , getBinDataFunctionTemplate()->GetFunction() );
+
+ global->Set( v8::String::New("NumberLong") , getNumberLongFunctionTemplate()->GetFunction() );
BSONObjBuilder b;
b.appendMaxKey( "" );
@@ -107,7 +132,8 @@ namespace mongo {
}
void destroyConnection( Persistent<Value> object, void* parameter){
- cout << "Yo ho ho" << endl;
+ // TODO
+ cout << "warning: destroyConnection not implemented" << endl;
}
Handle<Value> mongoConsExternal(const Arguments& args){
@@ -122,16 +148,45 @@ namespace mongo {
strcpy( host , "127.0.0.1" );
}
- DBClientConnection * conn = new DBClientConnection( true );
-
+ DBClientWithCommands * conn = 0;
+ int commas = 0;
+ for ( int i=0; i<255; i++ ){
+ if ( host[i] == ',' )
+ commas++;
+ else if ( host[i] == 0 )
+ break;
+ }
+
+ if ( commas == 0 ){
+ DBClientConnection * c = new DBClientConnection( true );
+ string errmsg;
+ if ( ! c->connect( host , errmsg ) ){
+ delete c;
+ string x = "couldn't connect: ";
+ x += errmsg;
+ return v8::ThrowException( v8::String::New( x.c_str() ) );
+ }
+ conn = c;
+ }
+ else if ( commas == 1 ){
+ DBClientPaired * c = new DBClientPaired();
+ if ( ! c->connect( host ) ){
+ delete c;
+ return v8::ThrowException( v8::String::New( "couldn't connect to pair" ) );
+ }
+ conn = c;
+ }
+ else if ( commas == 2 ){
+ conn = new SyncClusterConnection( host );
+ }
+ else {
+ return v8::ThrowException( v8::String::New( "too many commas" ) );
+ }
+
Persistent<v8::Object> self = Persistent<v8::Object>::New( args.This() );
self.MakeWeak( conn , destroyConnection );
- string errmsg;
- if ( ! conn->connect( host , errmsg ) ){
- return v8::ThrowException( v8::String::New( "couldn't connect" ) );
- }
-
+ ScriptEngine::runConnectCallback( *conn );
// NOTE I don't believe the conn object will ever be freed.
args.This()->Set( CONN_STRING , External::New( conn ) );
args.This()->Set( v8::String::New( "slaveOk" ) , Boolean::New( false ) );
@@ -184,7 +239,7 @@ namespace mongo {
4 - skip
*/
Handle<Value> mongoFind(const Arguments& args){
- jsassert( args.Length() == 5 , "find needs 5 args" );
+ jsassert( args.Length() == 6 , "find needs 6 args" );
jsassert( args[1]->IsObject() , "needs to be an object" );
DBClientBase * conn = getConnection( args );
GETNS;
@@ -201,14 +256,15 @@ namespace mongo {
Local<v8::Value> slaveOkVal = mongo->Get( v8::String::New( "slaveOk" ) );
jsassert( slaveOkVal->IsBoolean(), "slaveOk member invalid" );
bool slaveOk = slaveOkVal->BooleanValue();
-
+
try {
auto_ptr<mongo::DBClientCursor> cursor;
int nToReturn = (int)(args[3]->ToNumber()->Value());
int nToSkip = (int)(args[4]->ToNumber()->Value());
+ int batchSize = (int)(args[5]->ToNumber()->Value());
{
v8::Unlocker u;
- cursor = conn->query( ns, q , nToReturn , nToSkip , haveFields ? &fields : 0, slaveOk ? QueryOption_SlaveOk : 0 );
+ cursor = conn->query( ns, q , nToReturn , nToSkip , haveFields ? &fields : 0, slaveOk ? QueryOption_SlaveOk : 0 , batchSize );
}
v8::Function * cons = (v8::Function*)( *( mongo->Get( v8::String::New( "internalCursor" ) ) ) );
assert( cons );
@@ -399,6 +455,11 @@ namespace mongo {
t->Set( v8::String::New( "_skip" ) , args[7] );
else
t->Set( v8::String::New( "_skip" ) , Number::New( 0 ) );
+
+ if ( args.Length() > 8 && args[8]->IsNumber() )
+ t->Set( v8::String::New( "_batchSize" ) , args[7] );
+ else
+ t->Set( v8::String::New( "_batchSize" ) , Number::New( 0 ) );
t->Set( v8::String::New( "_cursor" ) , v8::Null() );
t->Set( v8::String::New( "_numReturned" ) , v8::Number::New(0) );
@@ -473,7 +534,7 @@ namespace mongo {
v8::Handle<v8::Value> dbRefInit( const v8::Arguments& args ) {
- if (args.Length() != 2) {
+ if (args.Length() != 2 && args.Length() != 0) {
return v8::ThrowException( v8::String::New( "DBRef needs 2 arguments" ) );
}
@@ -484,8 +545,10 @@ namespace mongo {
it = f->NewInstance();
}
- it->Set( v8::String::New( "$ref" ) , args[0] );
- it->Set( v8::String::New( "$id" ) , args[1] );
+ if ( args.Length() == 2 ) {
+ it->Set( v8::String::New( "$ref" ) , args[0] );
+ it->Set( v8::String::New( "$id" ) , args[1] );
+ }
return it;
}
@@ -511,25 +574,126 @@ namespace mongo {
}
v8::Handle<v8::Value> binDataInit( const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+
+ // 3 args: len, type, data
+ if (args.Length() == 3) {
- if (args.Length() != 3) {
+ if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ){
+ v8::Function* f = getNamedCons( "BinData" );
+ it = f->NewInstance();
+ }
+
+ it->Set( v8::String::New( "len" ) , args[0] );
+ it->Set( v8::String::New( "type" ) , args[1] );
+ it->Set( v8::String::New( "data" ), args[2] );
+ it->SetHiddenValue( v8::String::New( "__BinData" ), v8::Number::New( 1 ) );
+
+ // 2 args: type, base64 string
+ } else if ( args.Length() == 2 ) {
+
+ if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ){
+ v8::Function* f = getNamedCons( "BinData" );
+ it = f->NewInstance();
+ }
+
+ v8::String::Utf8Value data( args[ 1 ] );
+ string decoded = base64::decode( *data );
+ it->Set( v8::String::New( "len" ) , v8::Number::New( decoded.length() ) );
+ it->Set( v8::String::New( "type" ) , args[ 0 ] );
+ it->Set( v8::String::New( "data" ), v8::String::New( decoded.data(), decoded.length() ) );
+ it->SetHiddenValue( v8::String::New( "__BinData" ), v8::Number::New( 1 ) );
+
+ } else {
return v8::ThrowException( v8::String::New( "BinData needs 3 arguments" ) );
}
+
+ return it;
+ }
+
+ v8::Handle<v8::Value> binDataToString( const v8::Arguments& args ) {
+
+ if (args.Length() != 0) {
+ return v8::ThrowException( v8::String::New( "toString needs 0 arguments" ) );
+ }
+
+ v8::Handle<v8::Object> it = args.This();
+ int len = it->Get( v8::String::New( "len" ) )->ToInt32()->Value();
+ int type = it->Get( v8::String::New( "type" ) )->ToInt32()->Value();
+ v8::String::Utf8Value data( it->Get( v8::String::New( "data" ) ) );
+
+ stringstream ss;
+ ss << "BinData( type: " << type << ", base64: \"";
+ base64::encode( ss, *data, len );
+ ss << "\" )";
+ string ret = ss.str();
+ return v8::String::New( ret.c_str() );
+ }
+
+ v8::Handle<v8::Value> numberLongInit( const v8::Arguments& args ) {
+
+ if (args.Length() != 1 && args.Length() != 3) {
+ return v8::ThrowException( v8::String::New( "NumberLong needs 1 or 3 arguments" ) );
+ }
v8::Handle<v8::Object> it = args.This();
if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ){
- v8::Function* f = getNamedCons( "BinData" );
+ v8::Function* f = getNamedCons( "NumberLong" );
it = f->NewInstance();
}
- it->Set( v8::String::New( "len" ) , args[0] );
- it->Set( v8::String::New( "type" ) , args[1] );
- it->Set( v8::String::New( "data" ), args[2] );
- it->SetHiddenValue( v8::String::New( "__BinData" ), v8::Number::New( 1 ) );
+ it->Set( v8::String::New( "floatApprox" ) , args[0] );
+ if ( args.Length() == 3 ) {
+ it->Set( v8::String::New( "top" ) , args[1] );
+ it->Set( v8::String::New( "bottom" ) , args[2] );
+ }
+ it->SetHiddenValue( v8::String::New( "__NumberLong" ), v8::Number::New( 1 ) );
return it;
}
+
+ long long numberLongVal( const v8::Handle< v8::Object > &it ) {
+ if ( !it->Has( v8::String::New( "top" ) ) )
+ return (long long)( it->Get( v8::String::New( "floatApprox" ) )->NumberValue() );
+ return
+ (long long)
+ ( (unsigned long long)( it->Get( v8::String::New( "top" ) )->ToInt32()->Value() ) << 32 ) +
+ (unsigned)( it->Get( v8::String::New( "bottom" ) )->ToInt32()->Value() );
+ }
+
+ v8::Handle<v8::Value> numberLongValueOf( const v8::Arguments& args ) {
+
+ if (args.Length() != 0) {
+ return v8::ThrowException( v8::String::New( "toNumber needs 0 arguments" ) );
+ }
+
+ v8::Handle<v8::Object> it = args.This();
+
+ long long val = numberLongVal( it );
+
+ return v8::Number::New( double( val ) );
+ }
+
+ v8::Handle<v8::Value> numberLongToNumber( const v8::Arguments& args ) {
+ return numberLongValueOf( args );
+ }
+
+ v8::Handle<v8::Value> numberLongToString( const v8::Arguments& args ) {
+
+ if (args.Length() != 0) {
+ return v8::ThrowException( v8::String::New( "toString needs 0 arguments" ) );
+ }
+
+ v8::Handle<v8::Object> it = args.This();
+
+ long long val = numberLongVal( it );
+
+ stringstream ss;
+ ss << val;
+ string ret = ss.str();
+ return v8::String::New( ret.c_str() );
+ }
v8::Handle<v8::Value> bsonsize( const v8::Arguments& args ) {
diff --git a/scripting/v8_db.h b/scripting/v8_db.h
index c3f2ef1..92e2ae2 100644
--- a/scripting/v8_db.h
+++ b/scripting/v8_db.h
@@ -60,6 +60,12 @@ namespace mongo {
v8::Handle<v8::Value> dbPointerInit( const v8::Arguments& args );
v8::Handle<v8::Value> binDataInit( const v8::Arguments& args );
+ v8::Handle<v8::Value> binDataToString( const v8::Arguments& args );
+
+ v8::Handle<v8::Value> numberLongInit( const v8::Arguments& args );
+ v8::Handle<v8::Value> numberLongToNumber(const v8::Arguments& args);
+ v8::Handle<v8::Value> numberLongValueOf(const v8::Arguments& args);
+ v8::Handle<v8::Value> numberLongToString(const v8::Arguments& args);
v8::Handle<v8::Value> dbQueryInit( const v8::Arguments& args );
v8::Handle<v8::Value> dbQueryIndexAccess( uint32_t index , const v8::AccessorInfo& info );
diff --git a/scripting/v8_wrapper.cpp b/scripting/v8_wrapper.cpp
index 29a70ba..c4e6b7d 100644
--- a/scripting/v8_wrapper.cpp
+++ b/scripting/v8_wrapper.cpp
@@ -67,16 +67,15 @@ namespace mongo {
Local<v8::Object> mongoToV8( const BSONObj& m , bool array, bool readOnly ){
+ Local<v8::Object> o;
+
// handle DBRef. needs to come first. isn't it? (metagoto)
static string ref = "$ref";
if ( ref == m.firstElement().fieldName() ) {
const BSONElement& id = m["$id"];
if (!id.eoo()) { // there's no check on $id exitence in sm implementation. risky ?
v8::Function* dbRef = getNamedCons( "DBRef" );
- v8::Handle<v8::Value> argv[2];
- argv[0] = mongoToV8Element(m.firstElement());
- argv[1] = mongoToV8Element(m["$id"]);
- return dbRef->NewInstance(2, argv);
+ o = dbRef->NewInstance();
}
}
@@ -85,9 +84,11 @@ namespace mongo {
Local< v8::ObjectTemplate > internalFieldObjects = v8::ObjectTemplate::New();
internalFieldObjects->SetInternalFieldCount( 1 );
- Local<v8::Object> o;
- if ( array ) {
- // NOTE Looks like it's impossible to add interceptors to non array objects in v8.
+ if ( !o.IsEmpty() ) {
+ readOnly = false;
+ } else if ( array ) {
+ // NOTE Looks like it's impossible to add interceptors to v8 arrays.
+ readOnly = false;
o = v8::Array::New();
} else if ( !readOnly ) {
o = v8::Object::New();
@@ -149,7 +150,6 @@ namespace mongo {
case mongo::NumberDouble:
case mongo::NumberInt:
- case mongo::NumberLong: // may lose information here - just copying sm engine behavior
o->Set( v8::String::New( f.fieldName() ) , v8::Number::New( f.number() ) );
break;
@@ -168,6 +168,7 @@ namespace mongo {
break;
case mongo::jstNULL:
+ case mongo::Undefined: // duplicate sm behavior
o->Set( v8::String::New( f.fieldName() ) , v8::Null() );
break;
@@ -200,7 +201,7 @@ namespace mongo {
case mongo::Timestamp: {
Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
- sub->Set( v8::String::New( "time" ) , v8::Date::New( f.timestampTime() ) );
+ sub->Set( v8::String::New( "t" ) , v8::Number::New( f.timestampTime() ) );
sub->Set( v8::String::New( "i" ) , v8::Number::New( f.timestampInc() ) );
sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
@@ -208,6 +209,24 @@ namespace mongo {
break;
}
+ case mongo::NumberLong: {
+ Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
+ unsigned long long val = f.numberLong();
+ v8::Function* numberLong = getNamedCons( "NumberLong" );
+ if ( (long long)val == (long long)(double)(long long)(val) ) {
+ v8::Handle<v8::Value> argv[1];
+ argv[0] = v8::Number::New( (double)(long long)( val ) );
+ o->Set( v8::String::New( f.fieldName() ), numberLong->NewInstance( 1, argv ) );
+ } else {
+ v8::Handle<v8::Value> argv[3];
+ argv[0] = v8::Number::New( (double)(long long)(val) );
+ argv[1] = v8::Integer::New( val >> 32 );
+ argv[2] = v8::Integer::New( (unsigned long)(val & 0x00000000ffffffff) );
+ o->Set( v8::String::New( f.fieldName() ), numberLong->NewInstance(3, argv) );
+ }
+ break;
+ }
+
case mongo::MinKey: {
Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
sub->Set( v8::String::New( "$MinKey" ), v8::Boolean::New( true ) );
@@ -224,10 +243,6 @@ namespace mongo {
break;
}
- case mongo::Undefined:
- o->Set( v8::String::New( f.fieldName() ), v8::Undefined() );
- break;
-
case mongo::DBRef: {
v8::Function* dbPointer = getNamedCons( "DBPointer" );
v8::Handle<v8::Value> argv[2];
@@ -247,7 +262,7 @@ namespace mongo {
}
- if ( !array && readOnly ) {
+ if ( readOnly ) {
readOnlyObjects->SetNamedPropertyHandler( 0, NamedReadOnlySet, 0, NamedReadOnlyDelete );
readOnlyObjects->SetIndexedPropertyHandler( 0, IndexedReadOnlySet, 0, IndexedReadOnlyDelete );
}
@@ -291,6 +306,7 @@ namespace mongo {
case mongo::EOO:
case mongo::jstNULL:
+ case mongo::Undefined: // duplicate sm behavior
return v8::Null();
case mongo::RegEx: {
@@ -319,12 +335,29 @@ namespace mongo {
case mongo::Timestamp: {
Local<v8::Object> sub = internalFieldObjects->NewInstance();
- sub->Set( v8::String::New( "time" ) , v8::Date::New( f.timestampTime() ) );
+ sub->Set( v8::String::New( "t" ) , v8::Number::New( f.timestampTime() ) );
sub->Set( v8::String::New( "i" ) , v8::Number::New( f.timestampInc() ) );
sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
return sub;
}
+
+ case mongo::NumberLong: {
+ Local<v8::Object> sub = internalFieldObjects->NewInstance();
+ unsigned long long val = f.numberLong();
+ v8::Function* numberLong = getNamedCons( "NumberLong" );
+ if ( (long long)val == (long long)(double)(long long)(val) ) {
+ v8::Handle<v8::Value> argv[1];
+ argv[0] = v8::Number::New( (double)(long long)( val ) );
+ return numberLong->NewInstance( 1, argv );
+ } else {
+ v8::Handle<v8::Value> argv[3];
+ argv[0] = v8::Number::New( (double)(long long)( val ) );
+ argv[1] = v8::Integer::New( val >> 32 );
+ argv[2] = v8::Integer::New( (unsigned long)(val & 0x00000000ffffffff) );
+ return numberLong->NewInstance( 3, argv );
+ }
+ }
case mongo::MinKey: {
Local<v8::Object> sub = internalFieldObjects->NewInstance();
@@ -340,9 +373,6 @@ namespace mongo {
return sub;
}
- case mongo::Undefined:
- return v8::Undefined();
-
case mongo::DBRef: {
v8::Function* dbPointer = getNamedCons( "DBPointer" );
v8::Handle<v8::Value> argv[2];
@@ -362,7 +392,7 @@ namespace mongo {
return v8::Undefined();
}
- void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name , const string sname , v8::Handle<v8::Value> value ){
+ void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name , const string sname , v8::Handle<v8::Value> value , int depth ){
if ( value->IsString() ){
b.append( sname.c_str() , toSTLString( value ).c_str() );
@@ -383,7 +413,7 @@ namespace mongo {
}
if ( value->IsArray() ){
- BSONObj sub = v8ToMongo( value->ToObject() );
+ BSONObj sub = v8ToMongo( value->ToObject() , depth );
b.appendArray( sname.c_str() , sub );
return;
}
@@ -405,7 +435,7 @@ namespace mongo {
switch( obj->GetInternalField( 0 )->ToInt32()->Value() ) { // NOTE Uint32's Value() gave me a linking error, so going with this instead
case Timestamp:
b.appendTimestamp( sname.c_str(),
- Date_t( v8::Date::Cast( *obj->Get( v8::String::New( "time" ) ) )->NumberValue() ),
+ Date_t( obj->Get( v8::String::New( "t" ) )->ToNumber()->Value() ),
obj->Get( v8::String::New( "i" ) )->ToInt32()->Value() );
return;
case MinKey:
@@ -421,8 +451,8 @@ namespace mongo {
string s = toSTLString( value );
if ( s.size() && s[0] == '/' ){
s = s.substr( 1 );
- string r = s.substr( 0 , s.find( "/" ) );
- string o = s.substr( s.find( "/" ) + 1 );
+ string r = s.substr( 0 , s.rfind( "/" ) );
+ string o = s.substr( s.rfind( "/" ) + 1 );
b.appendRegex( sname.c_str() , r.c_str() , o.c_str() );
}
else if ( value->ToObject()->GetPrototype()->IsObject() &&
@@ -431,10 +461,23 @@ namespace mongo {
oid.init( toSTLString( value ) );
b.appendOID( sname.c_str() , &oid );
}
- else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__DBPointer" ) ).IsEmpty() ) {
+ else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__NumberLong" ) ).IsEmpty() ) {
// TODO might be nice to potentially speed this up with an indexed internal
// field, but I don't yet know how to use an ObjectTemplate with a
// constructor.
+ v8::Handle< v8::Object > it = value->ToObject();
+ long long val;
+ if ( !it->Has( v8::String::New( "top" ) ) ) {
+ val = (long long)( it->Get( v8::String::New( "floatApprox" ) )->NumberValue() );
+ } else {
+ val = (long long)
+ ( (unsigned long long)( it->Get( v8::String::New( "top" ) )->ToInt32()->Value() ) << 32 ) +
+ (unsigned)( it->Get( v8::String::New( "bottom" ) )->ToInt32()->Value() );
+ }
+
+ b.append( sname.c_str(), val );
+ }
+ else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__DBPointer" ) ).IsEmpty() ) {
OID oid;
oid.init( toSTLString( value->ToObject()->Get( v8::String::New( "id" ) ) ) );
string ns = toSTLString( value->ToObject()->Get( v8::String::New( "ns" ) ) );
@@ -450,7 +493,7 @@ namespace mongo {
mongo::BinDataType( obj->Get( v8::String::New( "type" ) )->ToInt32()->Value() ),
dataArray );
} else {
- BSONObj sub = v8ToMongo( value->ToObject() );
+ BSONObj sub = v8ToMongo( value->ToObject() , depth );
b.append( sname.c_str() , sub );
}
return;
@@ -474,12 +517,14 @@ namespace mongo {
cout << "don't know how to convert to mongo field [" << name << "]\t" << value << endl;
}
- BSONObj v8ToMongo( v8::Handle<v8::Object> o ){
+ BSONObj v8ToMongo( v8::Handle<v8::Object> o , int depth ){
BSONObjBuilder b;
-
- v8::Handle<v8::String> idName = v8::String::New( "_id" );
- if ( o->HasRealNamedProperty( idName ) ){
- v8ToMongoElement( b , idName , "_id" , o->Get( idName ) );
+
+ if ( depth == 0 ){
+ v8::Handle<v8::String> idName = v8::String::New( "_id" );
+ if ( o->HasRealNamedProperty( idName ) ){
+ v8ToMongoElement( b , idName , "_id" , o->Get( idName ) );
+ }
}
Local<v8::Array> names = o->GetPropertyNames();
@@ -493,10 +538,10 @@ namespace mongo {
v8::Local<v8::Value> value = o->Get( name );
const string sname = toSTLString( name );
- if ( sname == "_id" )
+ if ( depth == 0 && sname == "_id" )
continue;
- v8ToMongoElement( b , name , sname , value );
+ v8ToMongoElement( b , name , sname , value , depth + 1 );
}
return b.obj();
}
diff --git a/scripting/v8_wrapper.h b/scripting/v8_wrapper.h
index 1d67cf1..838aaf4 100644
--- a/scripting/v8_wrapper.h
+++ b/scripting/v8_wrapper.h
@@ -26,10 +26,10 @@
namespace mongo {
v8::Local<v8::Object> mongoToV8( const mongo::BSONObj & m , bool array = 0 , bool readOnly = false );
- mongo::BSONObj v8ToMongo( v8::Handle<v8::Object> o );
+ mongo::BSONObj v8ToMongo( v8::Handle<v8::Object> o , int depth = 0 );
void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name ,
- const string sname , v8::Handle<v8::Value> value );
+ const string sname , v8::Handle<v8::Value> value , int depth = 0 );
v8::Handle<v8::Value> mongoToV8Element( const BSONElement &f );
v8::Function * getNamedCons( const char * name );
diff --git a/shell/collection.js b/shell/collection.js
index d228ba7..edb07ae 100644
--- a/shell/collection.js
+++ b/shell/collection.js
@@ -26,15 +26,16 @@ DBCollection.prototype.getName = function(){
return this._shortName;
}
-DBCollection.prototype.help = function(){
+DBCollection.prototype.help = function() {
print("DBCollection help");
print("\tdb.foo.count()");
print("\tdb.foo.dataSize()");
- print("\tdb.foo.distinct( key ) - eg. db.foo.distinct( 'x' )" );
+ print("\tdb.foo.distinct( key ) - eg. db.foo.distinct( 'x' )");
print("\tdb.foo.drop() drop the collection");
print("\tdb.foo.dropIndex(name)");
print("\tdb.foo.dropIndexes()");
print("\tdb.foo.ensureIndex(keypattern,options) - options should be an object with these possible fields: name, unique, dropDups");
+ print("\tdb.foo.reIndex()");
print("\tdb.foo.find( [query] , [fields]) - first parameter is an optional query filter. second parameter is optional set of fields to return.");
print("\t e.g. db.foo.find( { x : 77 } , { name : 1 , x : 1 } )");
print("\tdb.foo.find(...).count()");
@@ -46,9 +47,10 @@ DBCollection.prototype.help = function(){
print("\tdb.foo.getDB() get DB object associated with collection");
print("\tdb.foo.getIndexes()");
print("\tdb.foo.group( { key : ..., initial: ..., reduce : ...[, cond: ...] } )");
- print("\tdb.foo.mapReduce( mapFunction , reduceFunction , <optional params> )" );
- print("\tdb.foo.remove(query)" );
+ print("\tdb.foo.mapReduce( mapFunction , reduceFunction , <optional params> )");
+ print("\tdb.foo.remove(query)");
print("\tdb.foo.renameCollection( newName , <dropTarget> ) renames the collection.");
+ print("\tdb.foo.runCommand( name , <options> ) runs a db command with the given name where the 1st param is the colleciton name" );
print("\tdb.foo.save(obj)");
print("\tdb.foo.stats()");
print("\tdb.foo.storageSize() - includes free space allocated to this collection");
@@ -56,7 +58,7 @@ DBCollection.prototype.help = function(){
print("\tdb.foo.totalSize() - storage allocated for all data and indexes");
print("\tdb.foo.update(query, object[, upsert_bool, multi_bool])");
print("\tdb.foo.validate() - SLOW");
- print("\tdb.foo.getShardVersion() - only for use with sharding" );
+ print("\tdb.foo.getShardVersion() - only for use with sharding");
}
DBCollection.prototype.getFullName = function(){
@@ -66,10 +68,19 @@ DBCollection.prototype.getDB = function(){
return this._db;
}
-DBCollection.prototype._dbCommand = function( cmd ){
- return this._db._dbCommand( cmd );
+DBCollection.prototype._dbCommand = function( cmd , params ){
+ if ( typeof( cmd ) == "object" )
+ return this._db._dbCommand( cmd );
+
+ var c = {};
+ c[cmd] = this.getName();
+ if ( params )
+ Object.extend( c , params );
+ return this._db._dbCommand( c );
}
+DBCollection.prototype.runCommand = DBCollection.prototype._dbCommand;
+
DBCollection.prototype._massageObject = function( q ){
if ( ! q )
return {};
@@ -128,7 +139,7 @@ DBCollection.prototype.find = function( query , fields , limit , skip ){
}
DBCollection.prototype.findOne = function( query , fields ){
- var cursor = this._mongo.find( this._fullName , this._massageObject( query ) || {} , fields , -1 , 0 );
+ var cursor = this._mongo.find( this._fullName , this._massageObject( query ) || {} , fields , -1 , 0 , 0 );
if ( ! cursor.hasNext() )
return null;
var ret = cursor.next();
@@ -144,7 +155,15 @@ DBCollection.prototype.insert = function( obj , _allow_dot ){
if ( ! _allow_dot ) {
this._validateForStorage( obj );
}
- return this._mongo.insert( this._fullName , obj );
+ if ( typeof( obj._id ) == "undefined" ){
+ var tmp = obj; // don't want to modify input
+ obj = {_id: new ObjectId()};
+ for (var key in tmp){
+ obj[key] = tmp[key];
+ }
+ }
+ this._mongo.insert( this._fullName , obj );
+ this._lastID = obj._id;
}
DBCollection.prototype.remove = function( t ){
@@ -275,12 +294,8 @@ DBCollection.prototype.resetIndexCache = function(){
this._indexCache = {};
}
-DBCollection.prototype.reIndex = function(){
- var specs = this.getIndexSpecs();
- this.dropIndexes();
- for ( var i = 0; i < specs.length; ++i ){
- this.ensureIndex( specs[i].key, [ specs[i].unique, specs[i].name ] );
- }
+DBCollection.prototype.reIndex = function() {
+ return this._db.runCommand({ reIndex: this.getName() });
}
DBCollection.prototype.dropIndexes = function(){
@@ -311,7 +326,7 @@ DBCollection.prototype.drop = function(){
DBCollection.prototype.findAndModify = function(args){
var cmd = { findandmodify: this.getName() };
- for (key in args){
+ for (var key in args){
cmd[key] = args[key];
}
@@ -402,7 +417,7 @@ DBCollection.prototype.dropIndex = function(index) {
if ( ! isString( index ) && isObject( index ) )
index = this._genIndexName( index );
- var res = this._dbCommand( { deleteIndexes: this.getName(), index: index } );
+ var res = this._dbCommand( "deleteIndexes" ,{ index: index } );
this.resetIndexCache();
return res;
}
@@ -431,8 +446,8 @@ DBCollection.prototype.getCollection = function( subName ){
return this._db.getCollection( this._shortName + "." + subName );
}
-DBCollection.prototype.stats = function(){
- return this._db.runCommand( { collstats : this._shortName } );
+DBCollection.prototype.stats = function( scale ){
+ return this._db.runCommand( { collstats : this._shortName , scale : scale } );
}
DBCollection.prototype.dataSize = function(){
@@ -444,20 +459,13 @@ DBCollection.prototype.storageSize = function(){
}
DBCollection.prototype.totalIndexSize = function( verbose ){
- var total = 0;
- var mydb = this._db;
- var shortName = this._shortName;
- this.getIndexes().forEach(
- function( spec ){
- var coll = mydb.getCollection( shortName + ".$" + spec.name );
- var mysize = coll.dataSize();
- total += coll.dataSize();
- if ( verbose ) {
- print( coll + "\t" + mysize );
- }
+ var stats = this.stats();
+ if (verbose){
+ for (var ns in stats.indexSizes){
+ print( ns + "\t" + stats.indexSizes[ns] );
}
- );
- return total;
+ }
+ return stats.totalIndexSize;
}
diff --git a/shell/db.js b/shell/db.js
index ab79e22..bdb1153 100644
--- a/shell/db.js
+++ b/shell/db.js
@@ -20,6 +20,10 @@ DB.prototype.getName = function(){
return this._name;
}
+DB.prototype.stats = function(){
+ return this.runCommand( { dbstats : 1 } );
+}
+
DB.prototype.getCollection = function( name ){
return new DBCollection( this._mongo , this , name , this._name + "." + name );
}
@@ -48,10 +52,12 @@ DB.prototype._adminCommand = function( obj ){
return this.getSisterDB( "admin" ).runCommand( obj );
}
-DB.prototype.addUser = function( username , pass ){
+DB.prototype.addUser = function( username , pass, readOnly ){
+ readOnly = readOnly || false;
var c = this.getCollection( "system.users" );
var u = c.findOne( { user : username } ) || { user : username };
+ u.readOnly = readOnly;
u.pwd = hex_md5( username + ":mongo:" + pass );
print( tojson( u ) );
@@ -62,6 +68,10 @@ DB.prototype.removeUser = function( username ){
this.getCollection( "system.users" ).remove( { user : username } );
}
+DB.prototype.__pwHash = function( nonce, username, pass ) {
+ return hex_md5( nonce + username + hex_md5( username + ":mongo:" + pass ) );
+}
+
DB.prototype.auth = function( username , pass ){
var n = this.runCommand( { getnonce : 1 } );
@@ -70,7 +80,7 @@ DB.prototype.auth = function( username , pass ){
authenticate : 1 ,
user : username ,
nonce : n.nonce ,
- key : hex_md5( n.nonce + username + hex_md5( username + ":mongo:" + pass ) )
+ key : this.__pwHash( n.nonce, username, pass )
}
);
@@ -219,12 +229,16 @@ DB.prototype.cloneCollection = function(from, collection, query) {
* @return Object returned has member ok set to true if operation succeeds, false otherwise.
* See also: db.clone()
*/
-DB.prototype.copyDatabase = function(fromdb, todb, fromhost) {
+DB.prototype.copyDatabase = function(fromdb, todb, fromhost, username, password) {
assert( isString(fromdb) && fromdb.length );
assert( isString(todb) && todb.length );
fromhost = fromhost || "";
- //this.resetIndexCache();
- return this._adminCommand( { copydb:1, fromhost:fromhost, fromdb:fromdb, todb:todb } );
+ if ( username && password ) {
+ var n = this._adminCommand( { copydbgetnonce : 1, fromhost:fromhost } );
+ return this._adminCommand( { copydb:1, fromhost:fromhost, fromdb:fromdb, todb:todb, username:username, nonce:n.nonce, key:this.__pwHash( n.nonce, username, password ) } );
+ } else {
+ return this._adminCommand( { copydb:1, fromhost:fromhost, fromdb:fromdb, todb:todb } );
+ }
}
/**
@@ -239,7 +253,7 @@ DB.prototype.repairDatabase = function() {
DB.prototype.help = function() {
print("DB methods:");
- print("\tdb.addUser(username, password)");
+ print("\tdb.addUser(username, password[, readOnly=false])");
print("\tdb.auth(username, password)");
print("\tdb.cloneDatabase(fromhost)");
print("\tdb.commandHelp(name) returns the help for the command");
@@ -268,8 +282,10 @@ DB.prototype.help = function() {
print("\tdb.repairDatabase()");
print("\tdb.resetError()");
print("\tdb.runCommand(cmdObj) run a database command. if cmdObj is a string, turns it into { cmdObj : 1 }");
+ print("\tdb.serverStatus()");
print("\tdb.setProfilingLevel(level,<slowms>) 0=off 1=slow 2=all");
print("\tdb.shutdownServer()");
+ print("\tdb.stats()");
print("\tdb.version() current version of the server" );
}
@@ -490,7 +506,7 @@ DB.prototype.getCollectionNames = function(){
function(z){
var name = z.name;
- if ( name.indexOf( "$" ) >= 0 )
+ if ( name.indexOf( "$" ) >= 0 && name != "local.oplog.$main" )
return;
all.push( name.substring( nsLength ) );
diff --git a/shell/dbshell.cpp b/shell/dbshell.cpp
index 7984383..cad3698 100644
--- a/shell/dbshell.cpp
+++ b/shell/dbshell.cpp
@@ -1,4 +1,20 @@
// dbshell.cpp
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
#include <stdio.h>
@@ -51,7 +67,8 @@ void shellHistoryAdd( const char * line ){
if ( strlen(line) == 0 )
return;
#ifdef USE_READLINE
- add_history( line );
+ if ((strstr(line, ".auth")) == NULL)
+ add_history( line );
#endif
}
@@ -62,6 +79,21 @@ void intr( int sig ){
}
#if !defined(_WIN32)
+void killOps() {
+ if ( mongo::shellUtils::_nokillop || mongo::shellUtils::_allMyUris.size() == 0 )
+ return;
+ vector< string > uris;
+ for( map< const void*, string >::iterator i = mongo::shellUtils::_allMyUris.begin(); i != mongo::shellUtils::_allMyUris.end(); ++i )
+ uris.push_back( i->second );
+ mongo::BSONObj spec = BSON( "" << uris );
+ try {
+ auto_ptr< mongo::Scope > scope( mongo::globalScriptEngine->newScope() );
+ scope->invoke( "function( x ) { killWithUris( x ); }", spec );
+ } catch ( ... ) {
+ mongo::rawOut( "exception while cleaning up any db ops started by this shell\n" );
+ }
+}
+
void quitNicely( int sig ){
if ( sig == SIGINT && inMultiLine ){
gotInterrupted = 1;
@@ -69,6 +101,7 @@ void quitNicely( int sig ){
}
if ( sig == SIGPIPE )
mongo::rawOut( "mongo got signal SIGPIPE\n" );
+ killOps();
shellHistoryDone();
exit(0);
}
@@ -93,7 +126,7 @@ char * shellReadline( const char * prompt , int handlesigint = 0 ){
signal( SIGINT , quitNicely );
return ret;
#else
- printf( prompt );
+ printf("%s", prompt);
char * buf = new char[1024];
char * l = fgets( buf , 1024 , stdin );
int len = strlen( buf );
@@ -289,6 +322,7 @@ int _main(int argc, char* argv[]) {
hidden_options.add_options()
("dbaddress", po::value<string>(), "dbaddress")
("files", po::value< vector<string> >(), "files")
+ ("nokillop", "nokillop") // for testing, kill op will also be disabled automatically if the tests starts a mongo program
;
positional_options.add("dbaddress", 1);
@@ -336,7 +370,10 @@ int _main(int argc, char* argv[]) {
if (params.count("quiet")) {
mongo::cmdLine.quiet = true;
}
-
+ if (params.count("nokillop")) {
+ mongo::shellUtils::_nokillop = true;
+ }
+
/* This is a bit confusing, here are the rules:
*
* if nodb is set then all positional parameters are files
@@ -382,6 +419,7 @@ int _main(int argc, char* argv[]) {
}
+ mongo::ScriptEngine::setConnectCallback( mongo::shellUtils::onConnect );
mongo::ScriptEngine::setup();
mongo::globalScriptEngine->setScopeInitCallback( mongo::shellUtils::initScope );
auto_ptr< mongo::Scope > scope( mongo::globalScriptEngine->newScope() );
@@ -433,7 +471,7 @@ int _main(int argc, char* argv[]) {
}
string code = line;
- if ( code == "exit" ){
+ if ( code == "exit" || code == "exit;" ){
break;
}
if ( code.size() == 0 )
@@ -455,9 +493,15 @@ int _main(int argc, char* argv[]) {
cmd = cmd.substr( 0 , cmd.find( " " ) );
if ( cmd.find( "\"" ) == string::npos ){
- scope->exec( (string)"__iscmd__ = shellHelper[\"" + cmd + "\"];" , "(shellhelp1)" , false , true , true );
- if ( scope->getBoolean( "__iscmd__" ) ){
- scope->exec( (string)"shellHelper( \"" + cmd + "\" , \"" + code.substr( cmd.size() ) + "\");" , "(shellhelp2)" , false , true , false );
+ try {
+ scope->exec( (string)"__iscmd__ = shellHelper[\"" + cmd + "\"];" , "(shellhelp1)" , false , true , true );
+ if ( scope->getBoolean( "__iscmd__" ) ){
+ scope->exec( (string)"shellHelper( \"" + cmd + "\" , \"" + code.substr( cmd.size() ) + "\");" , "(shellhelp2)" , false , true , false );
+ wascmd = true;
+ }
+ }
+ catch ( std::exception& e ){
+ cout << "error2:" << e.what() << endl;
wascmd = true;
}
}
@@ -485,6 +529,7 @@ int _main(int argc, char* argv[]) {
}
int main(int argc, char* argv[]) {
+ static mongo::StaticObserver staticObserver;
try {
return _main( argc , argv );
}
@@ -494,10 +539,4 @@ int main(int argc, char* argv[]) {
}
}
-namespace mongo {
- DBClientBase * createDirectClient(){
- uassert( 10256 , "no createDirectClient in shell" , 0 );
- return 0;
- }
-}
diff --git a/shell/mongo.js b/shell/mongo.js
index dc7fcd9..acd028b 100644
--- a/shell/mongo.js
+++ b/shell/mongo.js
@@ -34,7 +34,7 @@ Mongo.prototype.getDB = function( name ){
Mongo.prototype.getDBs = function(){
var res = this.getDB( "admin" ).runCommand( { "listDatabases" : 1 } );
- assert( res.ok == 1 , "listDatabases failed" );
+ assert( res.ok == 1 , "listDatabases failed:" + tojson( res ) );
return res;
}
diff --git a/shell/mongo_vstudio.cpp b/shell/mongo_vstudio.cpp
index d0f1b48..f88b3c2 100644
--- a/shell/mongo_vstudio.cpp
+++ b/shell/mongo_vstudio.cpp
@@ -1,446 +1,616 @@
const char * jsconcatcode =
-"if ( ( typeof DBCollection ) == \"undefined\" ){\n"
- "DBCollection = function( mongo , db , shortName , fullName ){\n"
- "this._mongo = mongo;\n"
- "this._db = db;\n"
- "this._shortName = shortName;\n"
- "this._fullName = fullName;\n"
- "this.verify();\n"
- "}\n"
- "}\n"
- "DBCollection.prototype.verify = function(){\n"
- "assert( this._fullName , \"no fullName\" );\n"
- "assert( this._shortName , \"no shortName\" );\n"
- "assert( this._db , \"no db\" );\n"
- "assert.eq( this._fullName , this._db._name + \".\" + this._shortName , \"name mismatch\" );\n"
- "assert( this._mongo , \"no mongo in DBCollection\" );\n"
- "}\n"
- "DBCollection.prototype.getName = function(){\n"
- "return this._shortName;\n"
- "}\n"
- "DBCollection.prototype.help = function(){\n"
- "print(\"DBCollection help\");\n"
- "print(\"\\tdb.foo.getDB() get DB object associated with collection\");\n"
- "print(\"\\tdb.foo.findOne([query])\");\n"
- "print(\"\\tdb.foo.find( [query] , [fields]) - first parameter is an optional query filter. second parameter is optional set of fields to return.\");\n"
- "print(\"\\t e.g. db.foo.find( { x : 77 } , { name : 1 , x : 1 } )\");\n"
- "print(\"\\tdb.foo.find(...).sort(...)\");\n"
- "print(\"\\tdb.foo.find(...).limit(n)\");\n"
- "print(\"\\tdb.foo.find(...).skip(n)\");\n"
- "print(\"\\tdb.foo.find(...).count()\");\n"
- "print(\"\\tdb.foo.count()\");\n"
- "print(\"\\tdb.foo.group( { key : ..., initial: ..., reduce : ...[, cond: ...] } )\");\n"
- "print(\"\\tdb.foo.save(obj)\");\n"
- "print(\"\\tdb.foo.update(query, object[, upsert_bool])\");\n"
- "print(\"\\tdb.foo.remove(query)\" );\n"
- "print(\"\\tdb.foo.ensureIndex(keypattern,options) - options should be an object with these possible fields: name, unique, dropDups\");\n"
- "print(\"\\tdb.foo.dropIndexes()\");\n"
- "print(\"\\tdb.foo.dropIndex(name)\");\n"
- "print(\"\\tdb.foo.getIndexes()\");\n"
- "print(\"\\tdb.foo.drop() drop the collection\");\n"
- "print(\"\\tdb.foo.renameCollection( newName ) renames the collection\");\n"
- "print(\"\\tdb.foo.validate() - SLOW\");\n"
- "print(\"\\tdb.foo.stats()\");\n"
- "print(\"\\tdb.foo.dataSize()\");\n"
- "print(\"\\tdb.foo.storageSize() - includes free space allocated to this collection\");\n"
- "print(\"\\tdb.foo.totalIndexSize() - size in bytes of all the indexes\");\n"
- "print(\"\\tdb.foo.totalSize() - storage allocated for all data and indexes\");\n"
- "}\n"
- "DBCollection.prototype.getFullName = function(){\n"
- "return this._fullName;\n"
- "}\n"
- "DBCollection.prototype.getDB = function(){\n"
- "return this._db;\n"
- "}\n"
- "DBCollection.prototype._dbCommand = function( cmd ){\n"
- "return this._db._dbCommand( cmd );\n"
- "}\n"
- "DBCollection.prototype._massageObject = function( q ){\n"
- "if ( ! q )\n"
- "return {};\n"
- "var type = typeof q;\n"
- "if ( type == \"function\" )\n"
- "return { $where : q };\n"
- "if ( q.isObjectId )\n"
- "return { _id : q };\n"
- "if ( type == \"object\" )\n"
- "return q;\n"
- "if ( type == \"string\" ){\n"
- "if ( q.length == 24 )\n"
- "return { _id : q };\n"
- "return { $where : q };\n"
- "}\n"
- "throw \"don't know how to massage : \" + type;\n"
- "}\n"
- "DBCollection.prototype._validateObject = function( o ){\n"
- "if ( o._ensureSpecial && o._checkModify )\n"
- "throw \"can't save a DBQuery object\";\n"
- "}\n"
- "DBCollection._allowedFields = { $id : 1 , $ref : 1 };\n"
- "DBCollection.prototype._validateForStorage = function( o ){\n"
- "this._validateObject( o );\n"
- "for ( var k in o ){\n"
- "if ( k.indexOf( \".\" ) >= 0 ) {\n"
- "throw \"can't have . in field names [\" + k + \"]\" ;\n"
- "}\n"
- "if ( k.indexOf( \"$\" ) == 0 && ! DBCollection._allowedFields[k] ) {\n"
- "throw \"field names cannot start with $ [\" + k + \"]\";\n"
- "}\n"
- "if ( o[k] !== null && typeof( o[k] ) === \"object\" ) {\n"
- "this._validateForStorage( o[k] );\n"
- "}\n"
- "}\n"
- "};\n"
- "DBCollection.prototype.find = function( query , fields , limit , skip ){\n"
- "return new DBQuery( this._mongo , this._db , this ,\n"
- "this._fullName , this._massageObject( query ) , fields , limit , skip );\n"
- "}\n"
- "DBCollection.prototype.findOne = function( query , fields ){\n"
- "var cursor = this._mongo.find( this._fullName , this._massageObject( query ) || {} , fields , -1 , 0 );\n"
- "if ( ! cursor.hasNext() )\n"
+"__quiet = false;\n"
+ "chatty = function(s){\n"
+ "if ( ! __quiet )\n"
+ "print( s );}\n"
+ "friendlyEqual = function( a , b ){\n"
+ "if ( a == b )\n"
+ "return true;\n"
+ "if ( tojson( a ) == tojson( b ) )\n"
+ "return true;\n"
+ "return false;}\n"
+ "doassert = function( msg ){\n"
+ "print( \"assert: \" + msg );\n"
+ "throw msg;}\n"
+ "assert = function( b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( b )\n"
+ "return;\n"
+ "doassert( \"assert failed : \" + msg );}\n"
+ "assert._debug = false;\n"
+ "assert.eq = function( a , b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( a == b )\n"
+ "return;\n"
+ "if ( ( a != null && b != null ) && friendlyEqual( a , b ) )\n"
+ "return;\n"
+ "doassert( \"[\" + tojson( a ) + \"] != [\" + tojson( b ) + \"] are not equal : \" + msg );}\n"
+ "assert.neq = function( a , b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( a != b )\n"
+ "return;\n"
+ "doassert( \"[\" + a + \"] != [\" + b + \"] are equal : \" + msg );}\n"
+ "assert.soon = function( f, msg, timeout, interval ) {\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "var start = new Date();\n"
+ "timeout = timeout || 30000;\n"
+ "interval = interval || 200;\n"
+ "var last;\n"
+ "while( 1 ) {\n"
+ "if ( typeof( f ) == \"string\" ){\n"
+ "if ( eval( f ) )\n"
+ "return;}\n"
+ "else {\n"
+ "if ( f() )\n"
+ "return;}\n"
+ "if ( ( new Date() ).getTime() - start.getTime() > timeout )\n"
+ "doassert( \"assert.soon failed: \" + f + \", msg:\" + msg );\n"
+ "sleep( interval );}}\n"
+ "assert.throws = function( func , params , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "try {\n"
+ "func.apply( null , params );}\n"
+ "catch ( e ){\n"
+ "return e;}\n"
+ "doassert( \"did not throw exception: \" + msg );}\n"
+ "assert.commandWorked = function( res , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( res.ok == 1 )\n"
+ "return;\n"
+ "doassert( \"command failed: \" + tojson( res ) + \" : \" + msg );}\n"
+ "assert.commandFailed = function( res , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( res.ok == 0 )\n"
+ "return;\n"
+ "doassert( \"command worked when it should have failed: \" + tojson( res ) + \" : \" + msg );}\n"
+ "assert.isnull = function( what , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( what == null )\n"
+ "return;\n"
+ "doassert( \"supposed to null (\" + ( msg || \"\" ) + \") was: \" + tojson( what ) );}\n"
+ "assert.lt = function( a , b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( a < b )\n"
+ "return;\n"
+ "doassert( a + \" is not less than \" + b + \" : \" + msg );}\n"
+ "assert.gt = function( a , b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( a > b )\n"
+ "return;\n"
+ "doassert( a + \" is not greater than \" + b + \" : \" + msg );}\n"
+ "assert.close = function( a , b , msg ){\n"
+ "var diff = Math.abs( (a-b)/((a+b)/2) );\n"
+ "if ( diff < .001 )\n"
+ "return;\n"
+ "doassert( a + \" is not close to \" + b + \" diff: \" + diff + \" : \" + msg );}\n"
+ "Object.extend = function( dst , src , deep ){\n"
+ "for ( var k in src ){\n"
+ "var v = src[k];\n"
+ "if ( deep && typeof(v) == \"object\" ){\n"
+ "v = Object.extend( typeof ( v.length ) == \"number\" ? [] : {} , v , true );}\n"
+ "dst[k] = v;}\n"
+ "return dst;}\n"
+ "argumentsToArray = function( a ){\n"
+ "var arr = [];\n"
+ "for ( var i=0; i<a.length; i++ )\n"
+ "arr[i] = a[i];\n"
+ "return arr;}\n"
+ "isString = function( x ){\n"
+ "return typeof( x ) == \"string\";}\n"
+ "isNumber = function(x){\n"
+ "return typeof( x ) == \"number\";}\n"
+ "isObject = function( x ){\n"
+ "return typeof( x ) == \"object\";}\n"
+ "String.prototype.trim = function() {\n"
+ "return this.replace(/^\\s+|\\s+$/g,\"\");}\n"
+ "String.prototype.ltrim = function() {\n"
+ "return this.replace(/^\\s+/,\"\");}\n"
+ "String.prototype.rtrim = function() {\n"
+ "return this.replace(/\\s+$/,\"\");}\n"
+ "Date.timeFunc = function( theFunc , numTimes ){\n"
+ "var start = new Date();\n"
+ "numTimes = numTimes || 1;\n"
+ "for ( var i=0; i<numTimes; i++ ){\n"
+ "theFunc.apply( null , argumentsToArray( arguments ).slice( 2 ) );}\n"
+ "return (new Date()).getTime() - start.getTime();}\n"
+ "Date.prototype.tojson = function(){\n"
+ "return \"\\\"\" + this.toString() + \"\\\"\";}\n"
+ "RegExp.prototype.tojson = RegExp.prototype.toString;\n"
+ "Array.contains = function( a , x ){\n"
+ "for ( var i=0; i<a.length; i++ ){\n"
+ "if ( a[i] == x )\n"
+ "return true;}\n"
+ "return false;}\n"
+ "Array.unique = function( a ){\n"
+ "var u = [];\n"
+ "for ( var i=0; i<a.length; i++){\n"
+ "var o = a[i];\n"
+ "if ( ! Array.contains( u , o ) ){\n"
+ "u.push( o );}}\n"
+ "return u;}\n"
+ "Array.shuffle = function( arr ){\n"
+ "for ( var i=0; i<arr.length-1; i++ ){\n"
+ "var pos = i+Random.randInt(arr.length-i);\n"
+ "var save = arr[i];\n"
+ "arr[i] = arr[pos];\n"
+ "arr[pos] = save;}\n"
+ "return arr;}\n"
+ "Array.tojson = function( a , indent ){\n"
+ "if (!indent)\n"
+ "indent = \"\";\n"
+ "if (a.length == 0) {\n"
+ "return \"[ ]\";}\n"
+ "var s = \"[\\n\";\n"
+ "indent += \"\\t\";\n"
+ "for ( var i=0; i<a.length; i++){\n"
+ "s += indent + tojson( a[i], indent );\n"
+ "if ( i < a.length - 1 ){\n"
+ "s += \",\\n\";}}\n"
+ "if ( a.length == 0 ) {\n"
+ "s += indent;}\n"
+ "indent = indent.substring(1);\n"
+ "s += \"\\n\"+indent+\"]\";\n"
+ "return s;}\n"
+ "Array.fetchRefs = function( arr , coll ){\n"
+ "var n = [];\n"
+ "for ( var i=0; i<arr.length; i ++){\n"
+ "var z = arr[i];\n"
+ "if ( coll && coll != z.getCollection() )\n"
+ "continue;\n"
+ "n.push( z.fetch() );}\n"
+ "return n;}\n"
+ "Array.sum = function( arr ){\n"
+ "if ( arr.length == 0 )\n"
"return null;\n"
- "var ret = cursor.next();\n"
- "if ( cursor.hasNext() ) throw \"findOne has more than 1 result!\";\n"
- "if ( ret.$err )\n"
- "throw \"error \" + tojson( ret );\n"
- "return ret;\n"
- "}\n"
- "DBCollection.prototype.insert = function( obj , _allow_dot ){\n"
- "if ( ! obj )\n"
- "throw \"no object!\";\n"
- "if ( ! _allow_dot ) {\n"
- "this._validateForStorage( obj );\n"
- "}\n"
- "return this._mongo.insert( this._fullName , obj );\n"
- "}\n"
- "DBCollection.prototype.remove = function( t ){\n"
- "this._mongo.remove( this._fullName , this._massageObject( t ) );\n"
- "}\n"
- "DBCollection.prototype.update = function( query , obj , upsert ){\n"
- "assert( query , \"need a query\" );\n"
- "assert( obj , \"need an object\" );\n"
- "this._validateObject( obj );\n"
- "return this._mongo.update( this._fullName , query , obj , upsert ? true : false );\n"
- "}\n"
- "DBCollection.prototype.save = function( obj ){\n"
- "if ( obj == null || typeof( obj ) == \"undefined\" )\n"
- "throw \"can't save a null\";\n"
- "if ( typeof( obj._id ) == \"undefined\" ){\n"
- "obj._id = new ObjectId();\n"
- "return this.insert( obj );\n"
- "}\n"
+ "var s = arr[0];\n"
+ "for ( var i=1; i<arr.length; i++ )\n"
+ "s += arr[i];\n"
+ "return s;}\n"
+ "Array.avg = function( arr ){\n"
+ "if ( arr.length == 0 )\n"
+ "return null;\n"
+ "return Array.sum( arr ) / arr.length;}\n"
+ "Array.stdDev = function( arr ){\n"
+ "var avg = Array.avg( arr );\n"
+ "var sum = 0;\n"
+ "for ( var i=0; i<arr.length; i++ ){\n"
+ "sum += Math.pow( arr[i] - avg , 2 );}\n"
+ "return Math.sqrt( sum / arr.length );}\n"
+ "Object.keySet = function( o ) {\n"
+ "var ret = new Array();\n"
+ "for( i in o ) {\n"
+ "if ( !( i in o.__proto__ && o[ i ] === o.__proto__[ i ] ) ) {\n"
+ "ret.push( i );}}\n"
+ "return ret;}\n"
+ "if ( ! ObjectId.prototype )\n"
+ "ObjectId.prototype = {}\n"
+ "ObjectId.prototype.toString = function(){\n"
+ "return this.str;}\n"
+ "ObjectId.prototype.tojson = function(){\n"
+ "return \"ObjectId(\\\"\" + this.str + \"\\\")\";}\n"
+ "ObjectId.prototype.isObjectId = true;\n"
+ "if ( typeof( DBPointer ) != \"undefined\" ){\n"
+ "DBPointer.prototype.fetch = function(){\n"
+ "assert( this.ns , \"need a ns\" );\n"
+ "assert( this.id , \"need an id\" );\n"
+ "return db[ this.ns ].findOne( { _id : this.id } );}\n"
+ "DBPointer.prototype.tojson = function(indent){\n"
+ "return tojson({\"ns\" : this.ns, \"id\" : this.id}, indent);}\n"
+ "DBPointer.prototype.getCollection = function(){\n"
+ "return this.ns;}\n"
+ "DBPointer.prototype.toString = function(){\n"
+ "return \"DBPointer \" + this.ns + \":\" + this.id;}}\n"
"else {\n"
- "return this.update( { _id : obj._id } , obj , true );\n"
- "}\n"
- "}\n"
- "DBCollection.prototype._genIndexName = function( keys ){\n"
- "var name = \"\";\n"
- "for ( var k in keys ){\n"
- "if ( name.length > 0 )\n"
- "name += \"_\";\n"
- "name += k + \"_\";\n"
- "var v = keys[k];\n"
- "if ( typeof v == \"number\" )\n"
- "name += v;\n"
- "}\n"
- "return name;\n"
- "}\n"
- "DBCollection.prototype._indexSpec = function( keys, options ) {\n"
- "var ret = { ns : this._fullName , key : keys , name : this._genIndexName( keys ) };\n"
- "if ( ! options ){\n"
- "}\n"
- "else if ( typeof ( options ) == \"string\" )\n"
- "ret.name = options;\n"
- "else if ( typeof ( options ) == \"boolean\" )\n"
- "ret.unique = true;\n"
- "else if ( typeof ( options ) == \"object\" ){\n"
- "if ( options.length ){\n"
- "var nb = 0;\n"
- "for ( var i=0; i<options.length; i++ ){\n"
- "if ( typeof ( options[i] ) == \"string\" )\n"
- "ret.name = options[i];\n"
- "else if ( typeof( options[i] ) == \"boolean\" ){\n"
- "if ( options[i] ){\n"
- "if ( nb == 0 )\n"
- "ret.unique = true;\n"
- "if ( nb == 1 )\n"
- "ret.dropDups = true;\n"
- "}\n"
- "nb++;\n"
- "}\n"
- "}\n"
- "}\n"
+ "print( \"warning: no DBPointer\" );}\n"
+ "if ( typeof( DBRef ) != \"undefined\" ){\n"
+ "DBRef.prototype.fetch = function(){\n"
+ "assert( this.$ref , \"need a ns\" );\n"
+ "assert( this.$id , \"need an id\" );\n"
+ "return db[ this.$ref ].findOne( { _id : this.$id } );}\n"
+ "DBRef.prototype.tojson = function(indent){\n"
+ "return tojson({\"$ref\" : this.$ref, \"$id\" : this.$id}, indent);}\n"
+ "DBRef.prototype.getCollection = function(){\n"
+ "return this.$ref;}\n"
+ "DBRef.prototype.toString = function(){\n"
+ "return this.tojson();}}\n"
"else {\n"
- "Object.extend( ret , options );\n"
- "}\n"
- "}\n"
+ "print( \"warning: no DBRef\" );}\n"
+ "if ( typeof( BinData ) != \"undefined\" ){\n"
+ "BinData.prototype.tojson = function(){\n"
+ "return \"BinData type: \" + this.type + \" len: \" + this.len;}}\n"
"else {\n"
- "throw \"can't handle: \" + typeof( options );\n"
- "}\n"
- "/*\n"
- "return ret;\n"
- "var name;\n"
- "var nTrue = 0;\n"
- "if ( ! isObject( options ) ) {\n"
- "options = [ options ];\n"
- "}\n"
- "if ( options.length ){\n"
- "for( var i = 0; i < options.length; ++i ) {\n"
- "var o = options[ i ];\n"
- "if ( isString( o ) ) {\n"
- "ret.name = o;\n"
- "} else if ( typeof( o ) == \"boolean\" ) {\n"
- "if ( o ) {\n"
- "++nTrue;\n"
- "}\n"
- "}\n"
- "}\n"
- "if ( nTrue > 0 ) {\n"
- "ret.unique = true;\n"
- "}\n"
- "if ( nTrue > 1 ) {\n"
- "ret.dropDups = true;\n"
- "}\n"
- "}\n"
- "*/\n"
- "return ret;\n"
- "}\n"
- "DBCollection.prototype.createIndex = function( keys , options ){\n"
- "var o = this._indexSpec( keys, options );\n"
- "this._db.getCollection( \"system.indexes\" ).insert( o , true );\n"
- "}\n"
- "DBCollection.prototype.ensureIndex = function( keys , options ){\n"
- "var name = this._indexSpec( keys, options ).name;\n"
- "this._indexCache = this._indexCache || {};\n"
- "if ( this._indexCache[ name ] ){\n"
- "return false;\n"
- "}\n"
- "this.createIndex( keys , options );\n"
- "if ( this.getDB().getLastError() == \"\" ) {\n"
- "this._indexCache[name] = true;\n"
- "}\n"
- "return true;\n"
- "}\n"
- "DBCollection.prototype.resetIndexCache = function(){\n"
- "this._indexCache = {};\n"
- "}\n"
- "DBCollection.prototype.reIndex = function(){\n"
- "var specs = this.getIndexSpecs();\n"
- "this.dropIndexes();\n"
- "for ( var i = 0; i < specs.length; ++i ){\n"
- "this.ensureIndex( specs[i].key, [ specs[i].unique, specs[i].name ] );\n"
- "}\n"
- "}\n"
- "DBCollection.prototype.dropIndexes = function(){\n"
- "this.resetIndexCache();\n"
- "var res = this._db.runCommand( { deleteIndexes: this.getName(), index: \"*\" } );\n"
- "assert( res , \"no result from dropIndex result\" );\n"
- "if ( res.ok )\n"
- "return res;\n"
- "if ( res.errmsg.match( /not found/ ) )\n"
- "return res;\n"
- "throw \"error dropping indexes : \" + tojson( res );\n"
- "}\n"
- "DBCollection.prototype.drop = function(){\n"
- "this.resetIndexCache();\n"
- "return this._db.runCommand( { drop: this.getName() } );\n"
- "}\n"
- "DBCollection.prototype.renameCollection = function( newName ){\n"
- "return this._db._adminCommand( { renameCollection : this._fullName , to : this._db._name + \".\" + newName } ).ok;\n"
- "}\n"
- "DBCollection.prototype.validate = function() {\n"
- "var res = this._db.runCommand( { validate: this.getName() } );\n"
- "res.valid = false;\n"
- "if ( res.result ){\n"
- "var str = \"-\" + tojson( res.result );\n"
- "res.valid = ! ( str.match( /exception/ ) || str.match( /corrupt/ ) );\n"
- "var p = /lastExtentSize:(\\d+)/;\n"
- "var r = p.exec( str );\n"
- "if ( r ){\n"
- "res.lastExtentSize = Number( r[1] );\n"
- "}\n"
- "}\n"
- "return res;\n"
- "}\n"
- "DBCollection.prototype.getIndexes = function(){\n"
- "return this.getDB().getCollection( \"system.indexes\" ).find( { ns : this.getFullName() } ).toArray();\n"
- "}\n"
- "DBCollection.prototype.getIndices = DBCollection.prototype.getIndexes;\n"
- "DBCollection.prototype.getIndexSpecs = DBCollection.prototype.getIndexes;\n"
- "DBCollection.prototype.getIndexKeys = function(){\n"
- "return this.getIndexes().map(\n"
- "function(i){\n"
- "return i.key;\n"
- "}\n"
+ "print( \"warning: no BinData\" );}\n"
+ "if ( typeof _threadInject != \"undefined\" ){\n"
+ "print( \"fork() available!\" );\n"
+ "Thread = function(){\n"
+ "this.init.apply( this, arguments );}\n"
+ "_threadInject( Thread.prototype );\n"
+ "ScopedThread = function() {\n"
+ "this.init.apply( this, arguments );}\n"
+ "ScopedThread.prototype = new Thread( function() {} );\n"
+ "_scopedThreadInject( ScopedThread.prototype );\n"
+ "fork = function() {\n"
+ "var t = new Thread( function() {} );\n"
+ "Thread.apply( t, arguments );\n"
+ "return t;}\n"
+ "EventGenerator = function( me, collectionName, mean ) {\n"
+ "this.mean = mean;\n"
+ "this.events = new Array( me, collectionName );}\n"
+ "EventGenerator.prototype._add = function( action ) {\n"
+ "this.events.push( [ Random.genExp( this.mean ), action ] );}\n"
+ "EventGenerator.prototype.addInsert = function( obj ) {\n"
+ "this._add( \"t.insert( \" + tojson( obj ) + \" )\" );}\n"
+ "EventGenerator.prototype.addRemove = function( obj ) {\n"
+ "this._add( \"t.remove( \" + tojson( obj ) + \" )\" );}\n"
+ "EventGenerator.prototype.addUpdate = function( objOld, objNew ) {\n"
+ "this._add( \"t.update( \" + tojson( objOld ) + \", \" + tojson( objNew ) + \" )\" );}\n"
+ "EventGenerator.prototype.addCheckCount = function( count, query, shouldPrint, checkQuery ) {\n"
+ "query = query || {};\n"
+ "shouldPrint = shouldPrint || false;\n"
+ "checkQuery = checkQuery || false;\n"
+ "var action = \"assert.eq( \" + count + \", t.count( \" + tojson( query ) + \" ) );\"\n"
+ "if ( checkQuery ) {\n"
+ "action += \" assert.eq( \" + count + \", t.find( \" + tojson( query ) + \" ).toArray().length );\"}\n"
+ "if ( shouldPrint ) {\n"
+ "action += \" print( me + ' ' + \" + count + \" );\";}\n"
+ "this._add( action );}\n"
+ "EventGenerator.prototype.getEvents = function() {\n"
+ "return this.events;}\n"
+ "EventGenerator.dispatch = function() {\n"
+ "var args = argumentsToArray( arguments );\n"
+ "var me = args.shift();\n"
+ "var collectionName = args.shift();\n"
+ "var m = new Mongo( db.getMongo().host );\n"
+ "var t = m.getDB( \"test\" )[ collectionName ];\n"
+ "for( var i in args ) {\n"
+ "sleep( args[ i ][ 0 ] );\n"
+ "eval( args[ i ][ 1 ] );}}\n"
+ "ParallelTester = function() {\n"
+ "this.params = new Array();}\n"
+ "ParallelTester.prototype.add = function( fun, args ) {\n"
+ "args = args || [];\n"
+ "args.unshift( fun );\n"
+ "this.params.push( args );}\n"
+ "ParallelTester.prototype.run = function( msg, newScopes ) {\n"
+ "newScopes = newScopes || false;\n"
+ "assert.parallelTests( this.params, msg, newScopes );}\n"
+ "ParallelTester.createJstestsLists = function( n ) {\n"
+ "var params = new Array();\n"
+ "for( var i = 0; i < n; ++i ) {\n"
+ "params.push( [] );}\n"
+ "var makeKeys = function( a ) {\n"
+ "var ret = {};\n"
+ "for( var i in a ) {\n"
+ "ret[ a[ i ] ] = 1;}\n"
+ "return ret;}\n"
+ "var skipTests = makeKeys( [ \"jstests/dbadmin.js\",\n"
+ "\"jstests/repair.js\",\n"
+ "\"jstests/cursor8.js\",\n"
+ "\"jstests/recstore.js\",\n"
+ "\"jstests/extent.js\",\n"
+ "\"jstests/indexb.js\",\n"
+ "\"jstests/profile1.js\",\n"
+ "\"jstests/mr3.js\",\n"
+ "\"jstests/apitest_db.js\"] );\n"
+ "var serialTestsArr = [ \"jstests/fsync.js\",\n"
+ "\"jstests/fsync2.js\" ];\n"
+ "var serialTests = makeKeys( serialTestsArr );\n"
+ "params[ 0 ] = serialTestsArr;\n"
+ "var files = listFiles(\"jstests\");\n"
+ "files = Array.shuffle( files );\n"
+ "var i = 0;\n"
+ "files.forEach(\n"
+ "function(x) {\n"
+ "if ( /_runner/.test(x.name) ||\n"
+ "/_lodeRunner/.test(x.name) ||\n"
+ "( x.name in skipTests ) ||\n"
+ "( x.name in serialTests ) ||\n"
+ "! /\\.js$/.test(x.name ) ){\n"
+ "print(\" >>>>>>>>>>>>>>> skipping \" + x.name);\n"
+ "return;}\n"
+ "params[ i % n ].push( x.name );\n"
+ "++i;}\n"
");\n"
- "}\n"
- "DBCollection.prototype.count = function( x ){\n"
- "return this.find( x ).count();\n"
- "}\n"
- "/**\n"
- "* Drop free lists. Normally not used.\n"
- "* Note this only does the collection itself, not the namespaces of its indexes (see cleanAll).\n"
- "*/\n"
- "DBCollection.prototype.clean = function() {\n"
- "return this._dbCommand( { clean: this.getName() } );\n"
- "}\n"
- "/**\n"
- "* <p>Drop a specified index.</p>\n"
- "*\n"
- "* <p>\n"
- "* Name is the name of the index in the system.indexes name field. (Run db.system.indexes.find() to\n"
- "* see example data.)\n"
- "* </p>\n"
- "*\n"
- "* <p>Note : alpha: space is not reclaimed </p>\n"
- "* @param {String} name of index to delete.\n"
- "* @return A result object. result.ok will be true if successful.\n"
- "*/\n"
- "DBCollection.prototype.dropIndex = function(index) {\n"
- "assert(index , \"need to specify index to dropIndex\" );\n"
- "if ( ! isString( index ) && isObject( index ) )\n"
- "index = this._genIndexName( index );\n"
- "var res = this._dbCommand( { deleteIndexes: this.getName(), index: index } );\n"
- "this.resetIndexCache();\n"
- "return res;\n"
- "}\n"
- "DBCollection.prototype.copyTo = function( newName ){\n"
- "return this.getDB().eval(\n"
- "function( collName , newName ){\n"
- "var from = db[collName];\n"
- "var to = db[newName];\n"
- "to.ensureIndex( { _id : 1 } );\n"
- "var count = 0;\n"
- "var cursor = from.find();\n"
- "while ( cursor.hasNext() ){\n"
- "var o = cursor.next();\n"
- "count++;\n"
- "to.save( o );\n"
- "}\n"
- "return count;\n"
- "} , this.getName() , newName\n"
+ "params[ 0 ] = Array.shuffle( params[ 0 ] );\n"
+ "for( var i in params ) {\n"
+ "params[ i ].unshift( i );}\n"
+ "return params;}\n"
+ "ParallelTester.fileTester = function() {\n"
+ "var args = argumentsToArray( arguments );\n"
+ "var suite = args.shift();\n"
+ "args.forEach(\n"
+ "function( x ) {\n"
+ "print(\" S\" + suite + \" Test : \" + x + \" ...\");\n"
+ "var time = Date.timeFunc( function() { load(x); }, 1);\n"
+ "print(\" S\" + suite + \" Test : \" + x + \" \" + time + \"ms\" );}\n"
+ ");}\n"
+ "assert.parallelTests = function( params, msg, newScopes ) {\n"
+ "newScopes = newScopes || false;\n"
+ "var wrapper = function( fun, argv ) {\n"
+ "eval (\n"
+ "\"var z = function() {\" +\n"
+ "\"var __parallelTests__fun = \" + fun.toString() + \";\" +\n"
+ "\"var __parallelTests__argv = \" + tojson( argv ) + \";\" +\n"
+ "\"var __parallelTests__passed = false;\" +\n"
+ "\"try {\" +\n"
+ "\"__parallelTests__fun.apply( 0, __parallelTests__argv );\" +\n"
+ "\"__parallelTests__passed = true;\" +\n"
+ "\"} catch ( e ) {\" +\n"
+ "\"print( e );\" +\n"
+ "\"}\" +\n"
+ "\"return __parallelTests__passed;\" +\n"
+ "\"}\"\n"
");\n"
- "}\n"
- "DBCollection.prototype.getCollection = function( subName ){\n"
- "return this._db.getCollection( this._shortName + \".\" + subName );\n"
- "}\n"
- "DBCollection.prototype.stats = function(){\n"
- "return this._db.runCommand( { collstats : this._shortName } );\n"
- "}\n"
- "DBCollection.prototype.dataSize = function(){\n"
- "return this.stats().size;\n"
- "}\n"
- "DBCollection.prototype.storageSize = function(){\n"
- "return this.stats().storageSize;\n"
- "}\n"
- "DBCollection.prototype.totalIndexSize = function( verbose ){\n"
+ "return z;}\n"
+ "var runners = new Array();\n"
+ "for( var i in params ) {\n"
+ "var param = params[ i ];\n"
+ "var test = param.shift();\n"
+ "var t;\n"
+ "if ( newScopes )\n"
+ "t = new ScopedThread( wrapper( test, param ) );\n"
+ "else\n"
+ "t = new Thread( wrapper( test, param ) );\n"
+ "runners.push( t );}\n"
+ "runners.forEach( function( x ) { x.start(); } );\n"
+ "var nFailed = 0;\n"
+ "runners.forEach( function( x ) { if( !x.returnData() ) { ++nFailed; } } );\n"
+ "assert.eq( 0, nFailed, msg );}}\n"
+ "tojson = function( x, indent , nolint ){\n"
+ "if ( x === null )\n"
+ "return \"null\";\n"
+ "if ( x === undefined )\n"
+ "return \"undefined\";\n"
+ "if (!indent)\n"
+ "indent = \"\";\n"
+ "switch ( typeof x ){\n"
+ "case \"string\": {\n"
+ "var s = \"\\\"\";\n"
+ "for ( var i=0; i<x.length; i++ ){\n"
+ "if ( x[i] == '\"' ){\n"
+ "s += \"\\\\\\\"\";}\n"
+ "else\n"
+ "s += x[i];}\n"
+ "return s + \"\\\"\";}\n"
+ "case \"number\":\n"
+ "case \"boolean\":\n"
+ "return \"\" + x;\n"
+ "case \"object\":{\n"
+ "var s = tojsonObject( x, indent , nolint );\n"
+ "if ( ( nolint == null || nolint == true ) && s.length < 80 && ( indent == null || indent.length == 0 ) ){\n"
+ "s = s.replace( /[\\s\\r\\n ]+/gm , \" \" );}\n"
+ "return s;}\n"
+ "case \"function\":\n"
+ "return x.toString();\n"
+ "default:\n"
+ "throw \"tojson can't handle type \" + ( typeof x );}}\n"
+ "tojsonObject = function( x, indent , nolint ){\n"
+ "var lineEnding = nolint ? \" \" : \"\\n\";\n"
+ "var tabSpace = nolint ? \"\" : \"\\t\";\n"
+ "assert.eq( ( typeof x ) , \"object\" , \"tojsonObject needs object, not [\" + ( typeof x ) + \"]\" );\n"
+ "if (!indent)\n"
+ "indent = \"\";\n"
+ "if ( typeof( x.tojson ) == \"function\" && x.tojson != tojson ) {\n"
+ "return x.tojson(indent,nolint);}\n"
+ "if ( typeof( x.constructor.tojson ) == \"function\" && x.constructor.tojson != tojson ) {\n"
+ "return x.constructor.tojson( x, indent , nolint );}\n"
+ "if ( x.toString() == \"[object MaxKey]\" )\n"
+ "return \"{ $maxKey : 1 }\";\n"
+ "if ( x.toString() == \"[object MinKey]\" )\n"
+ "return \"{ $minKey : 1 }\";\n"
+ "var s = \"{\" + lineEnding;\n"
+ "indent += tabSpace;\n"
"var total = 0;\n"
- "var mydb = this._db;\n"
- "var shortName = this._shortName;\n"
- "this.getIndexes().forEach(\n"
- "function( spec ){\n"
- "var coll = mydb.getCollection( shortName + \".$\" + spec.name );\n"
- "var mysize = coll.dataSize();\n"
- "total += coll.dataSize();\n"
- "if ( verbose ) {\n"
- "print( coll + \"\\t\" + mysize );\n"
- "}\n"
- "}\n"
- ");\n"
- "return total;\n"
- "}\n"
- "DBCollection.prototype.totalSize = function(){\n"
- "var total = this.storageSize();\n"
- "var mydb = this._db;\n"
- "var shortName = this._shortName;\n"
- "this.getIndexes().forEach(\n"
- "function( spec ){\n"
- "var coll = mydb.getCollection( shortName + \".$\" + spec.name );\n"
- "var mysize = coll.storageSize();\n"
- "total += coll.dataSize();\n"
- "}\n"
- ");\n"
- "return total;\n"
- "}\n"
- "DBCollection.prototype.convertToCapped = function( bytes ){\n"
- "if ( ! bytes )\n"
- "throw \"have to specify # of bytes\";\n"
- "return this._dbCommand( { convertToCapped : this._shortName , size : bytes } )\n"
- "}\n"
- "DBCollection.prototype.exists = function(){\n"
- "return this._db.system.namespaces.findOne( { name : this._fullName } );\n"
- "}\n"
- "DBCollection.prototype.isCapped = function(){\n"
- "var e = this.exists();\n"
- "return ( e && e.options && e.options.capped ) ? true : false;\n"
- "}\n"
- "DBCollection.prototype.distinct = function( keyString ){\n"
- "var res = this._dbCommand( { distinct : this._shortName , key : keyString } );\n"
- "if ( ! res.ok )\n"
- "throw \"distinct failed: \" + tojson( res );\n"
- "return res.values;\n"
- "}\n"
- "DBCollection.prototype.group = function( params ){\n"
- "params.ns = this._shortName;\n"
- "return this._db.group( params );\n"
- "}\n"
- "DBCollection.prototype.groupcmd = function( params ){\n"
- "params.ns = this._shortName;\n"
- "return this._db.groupcmd( params );\n"
- "}\n"
- "DBCollection.prototype.toString = function(){\n"
- "return this.getFullName();\n"
- "}\n"
- "DBCollection.prototype.shellPrint = DBCollection.prototype.toString;\n"
+ "for ( var k in x ) total++;\n"
+ "if ( total == 0 ) {\n"
+ "s += indent + lineEnding;}\n"
+ "var keys = x;\n"
+ "if ( typeof( x._simpleKeys ) == \"function\" )\n"
+ "keys = x._simpleKeys();\n"
+ "var num = 1;\n"
+ "for ( var k in keys ){\n"
+ "var val = x[k];\n"
+ "if ( val == DB.prototype || val == DBCollection.prototype )\n"
+ "continue;\n"
+ "s += indent + \"\\\"\" + k + \"\\\" : \" + tojson( val, indent , nolint );\n"
+ "if (num != total) {\n"
+ "s += \",\";\n"
+ "num++;}\n"
+ "s += lineEnding;}\n"
+ "indent = indent.substring(1);\n"
+ "return s + indent + \"}\";}\n"
+ "shellPrint = function( x ){\n"
+ "it = x;\n"
+ "if ( x != undefined )\n"
+ "shellPrintHelper( x );\n"
+ "if ( db ){\n"
+ "var e = db.getPrevError();\n"
+ "if ( e.err ) {\n"
+ "if( e.nPrev <= 1 )\n"
+ "print( \"error on last call: \" + tojson( e.err ) );\n"
+ "else\n"
+ "print( \"an error \" + tojson(e.err) + \" occurred \" + e.nPrev + \" operations back in the command invocation\" );}\n"
+ "db.resetError();}}\n"
+ "printjson = function(x){\n"
+ "print( tojson( x ) );}\n"
+ "shellPrintHelper = function( x ){\n"
+ "if ( typeof( x ) == \"undefined\" ){\n"
+ "if ( typeof( db ) != \"undefined\" && db.getLastError ){\n"
+ "var e = db.getLastError();\n"
+ "if ( e != null )\n"
+ "print( e );}\n"
+ "return;}\n"
+ "if ( x == null ){\n"
+ "print( \"null\" );\n"
+ "return;}\n"
+ "if ( typeof x != \"object\" )\n"
+ "return print( x );\n"
+ "var p = x.shellPrint;\n"
+ "if ( typeof p == \"function\" )\n"
+ "return x.shellPrint();\n"
+ "var p = x.tojson;\n"
+ "if ( typeof p == \"function\" )\n"
+ "print( x.tojson() );\n"
+ "else\n"
+ "print( tojson( x ) );}\n"
+ "shellHelper = function( command , rest , shouldPrint ){\n"
+ "command = command.trim();\n"
+ "var args = rest.trim().replace(/;$/,\"\").split( \"\\s+\" );\n"
+ "if ( ! shellHelper[command] )\n"
+ "throw \"no command [\" + command + \"]\";\n"
+ "var res = shellHelper[command].apply( null , args );\n"
+ "if ( shouldPrint ){\n"
+ "shellPrintHelper( res );}\n"
+ "return res;}\n"
+ "help = shellHelper.help = function(){\n"
+ "print( \"HELP\" );\n"
+ "print( \"\\t\" + \"show dbs show database names\");\n"
+ "print( \"\\t\" + \"show collections show collections in current database\");\n"
+ "print( \"\\t\" + \"show users show users in current database\");\n"
+ "print( \"\\t\" + \"show profile show most recent system.profile entries with time >= 1ms\");\n"
+ "print( \"\\t\" + \"use <db name> set curent database to <db name>\" );\n"
+ "print( \"\\t\" + \"db.help() help on DB methods\");\n"
+ "print( \"\\t\" + \"db.foo.help() help on collection methods\");\n"
+ "print( \"\\t\" + \"db.foo.find() list objects in collection foo\" );\n"
+ "print( \"\\t\" + \"db.foo.find( { a : 1 } ) list objects in foo where a == 1\" );\n"
+ "print( \"\\t\" + \"it result of the last line evaluated; use to further iterate\");}\n"
+ "shellHelper.use = function( dbname ){\n"
+ "db = db.getMongo().getDB( dbname );\n"
+ "print( \"switched to db \" + db.getName() );}\n"
+ "shellHelper.it = function(){\n"
+ "if ( typeof( ___it___ ) == \"undefined\" || ___it___ == null ){\n"
+ "print( \"no cursor\" );\n"
+ "return;}\n"
+ "shellPrintHelper( ___it___ );}\n"
+ "shellHelper.show = function( what ){\n"
+ "assert( typeof what == \"string\" );\n"
+ "if( what == \"profile\" ) {\n"
+ "if( db.system.profile.count() == 0 ) {\n"
+ "print(\"db.system.profile is empty\");\n"
+ "print(\"Use db.setProfilingLevel(2) will enable profiling\");\n"
+ "print(\"Use db.system.profile.find() to show raw profile entries\");}\n"
+ "else {\n"
+ "print();\n"
+ "db.system.profile.find({ millis : { $gt : 0 } }).sort({$natural:-1}).limit(5).forEach( function(x){print(\"\"+x.millis+\"ms \" + String(x.ts).substring(0,24)); print(x.info); print(\"\\n\");} )}\n"
+ "return \"\";}\n"
+ "if ( what == \"users\" ){\n"
+ "db.system.users.find().forEach( printjson );\n"
+ "return \"\";}\n"
+ "if ( what == \"collections\" || what == \"tables\" ) {\n"
+ "db.getCollectionNames().forEach( function(x){print(x)} );\n"
+ "return \"\";}\n"
+ "if ( what == \"dbs\" ) {\n"
+ "db.getMongo().getDBNames().sort().forEach( function(x){print(x)} );\n"
+ "return \"\";}\n"
+ "throw \"don't know how to show [\" + what + \"]\";}\n"
+ "if ( typeof( Map ) == \"undefined\" ){\n"
+ "Map = function(){\n"
+ "this._data = {};}}\n"
+ "Map.hash = function( val ){\n"
+ "if ( ! val )\n"
+ "return val;\n"
+ "switch ( typeof( val ) ){\n"
+ "case 'string':\n"
+ "case 'number':\n"
+ "case 'date':\n"
+ "return val.toString();\n"
+ "case 'object':\n"
+ "case 'array':\n"
+ "var s = \"\";\n"
+ "for ( var k in val ){\n"
+ "s += k + val[k];}\n"
+ "return s;}\n"
+ "throw \"can't hash : \" + typeof( val );}\n"
+ "Map.prototype.put = function( key , value ){\n"
+ "var o = this._get( key );\n"
+ "var old = o.value;\n"
+ "o.value = value;\n"
+ "return old;}\n"
+ "Map.prototype.get = function( key ){\n"
+ "return this._get( key ).value;}\n"
+ "Map.prototype._get = function( key ){\n"
+ "var h = Map.hash( key );\n"
+ "var a = this._data[h];\n"
+ "if ( ! a ){\n"
+ "a = [];\n"
+ "this._data[h] = a;}\n"
+ "for ( var i=0; i<a.length; i++ ){\n"
+ "if ( friendlyEqual( key , a[i].key ) ){\n"
+ "return a[i];}}\n"
+ "var o = { key : key , value : null };\n"
+ "a.push( o );\n"
+ "return o;}\n"
+ "Map.prototype.values = function(){\n"
+ "var all = [];\n"
+ "for ( var k in this._data ){\n"
+ "this._data[k].forEach( function(z){ all.push( z.value ); } );}\n"
+ "return all;}\n"
+ "if ( typeof( gc ) == \"undefined\" ){\n"
+ "gc = function(){}}\n"
+ "Math.sigFig = function( x , N ){\n"
+ "if ( ! N ){\n"
+ "N = 3;}\n"
+ "var p = Math.pow( 10, N - Math.ceil( Math.log( Math.abs(x) ) / Math.log( 10 )) );\n"
+ "return Math.round(x*p)/p;}\n"
+ "Random = function() {}\n"
+ "Random.srand = function( s ) { _srand( s ); }\n"
+ "Random.rand = function() { return _rand(); }\n"
+ "Random.randInt = function( n ) { return Math.floor( Random.rand() * n ); }\n"
+ "Random.setRandomSeed = function( s ) {\n"
+ "s = s || new Date().getTime();\n"
+ "print( \"setting random seed: \" + s );\n"
+ "Random.srand( s );}\n"
+ "Random.genExp = function( mean ) {\n"
+ "return -Math.log( Random.rand() ) * mean;}\n"
+ "killWithUris = function( uris ) {\n"
+ "var inprog = db.currentOp().inprog;\n"
+ "for( var u in uris ) {\n"
+ "for ( var i in inprog ) {\n"
+ "if ( uris[ u ] == inprog[ i ].client ) {\n"
+ "db.killOp( inprog[ i ].opid );}}}}\n"
"if ( typeof DB == \"undefined\" ){\n"
"DB = function( mongo , name ){\n"
"this._mongo = mongo;\n"
- "this._name = name;\n"
- "}\n"
- "}\n"
+ "this._name = name;}}\n"
"DB.prototype.getMongo = function(){\n"
"assert( this._mongo , \"why no mongo!\" );\n"
- "return this._mongo;\n"
- "}\n"
+ "return this._mongo;}\n"
"DB.prototype.getSisterDB = function( name ){\n"
- "return this.getMongo().getDB( name );\n"
- "}\n"
+ "return this.getMongo().getDB( name );}\n"
"DB.prototype.getName = function(){\n"
- "return this._name;\n"
- "}\n"
+ "return this._name;}\n"
+ "DB.prototype.stats = function(){\n"
+ "return this.runCommand( { dbstats : 1 } );}\n"
"DB.prototype.getCollection = function( name ){\n"
- "return new DBCollection( this._mongo , this , name , this._name + \".\" + name );\n"
- "}\n"
+ "return new DBCollection( this._mongo , this , name , this._name + \".\" + name );}\n"
"DB.prototype.commandHelp = function( name ){\n"
"var c = {};\n"
"c[name] = 1;\n"
"c.help = true;\n"
- "return this.runCommand( c ).help;\n"
- "}\n"
+ "return this.runCommand( c ).help;}\n"
"DB.prototype.runCommand = function( obj ){\n"
"if ( typeof( obj ) == \"string\" ){\n"
"var n = {};\n"
"n[obj] = 1;\n"
- "obj = n;\n"
- "}\n"
- "return this.getCollection( \"$cmd\" ).findOne( obj );\n"
- "}\n"
+ "obj = n;}\n"
+ "return this.getCollection( \"$cmd\" ).findOne( obj );}\n"
"DB.prototype._dbCommand = DB.prototype.runCommand;\n"
"DB.prototype._adminCommand = function( obj ){\n"
"if ( this._name == \"admin\" )\n"
"return this.runCommand( obj );\n"
- "return this.getSisterDB( \"admin\" ).runCommand( obj );\n"
- "}\n"
- "DB.prototype.addUser = function( username , pass ){\n"
+ "return this.getSisterDB( \"admin\" ).runCommand( obj );}\n"
+ "DB.prototype.addUser = function( username , pass, readOnly ){\n"
+ "readOnly = readOnly || false;\n"
"var c = this.getCollection( \"system.users\" );\n"
"var u = c.findOne( { user : username } ) || { user : username };\n"
+ "u.readOnly = readOnly;\n"
"u.pwd = hex_md5( username + \":mongo:\" + pass );\n"
"print( tojson( u ) );\n"
- "c.save( u );\n"
- "}\n"
+ "c.save( u );}\n"
"DB.prototype.removeUser = function( username ){\n"
- "this.getCollection( \"system.users\" ).remove( { user : username } );\n"
- "}\n"
+ "this.getCollection( \"system.users\" ).remove( { user : username } );}\n"
+ "DB.prototype.__pwHash = function( nonce, username, pass ) {\n"
+ "return hex_md5( nonce + username + hex_md5( username + \":mongo:\" + pass ) );}\n"
"DB.prototype.auth = function( username , pass ){\n"
"var n = this.runCommand( { getnonce : 1 } );\n"
"var a = this.runCommand(\n"
@@ -448,277 +618,127 @@ const char * jsconcatcode =
"authenticate : 1 ,\n"
"user : username ,\n"
"nonce : n.nonce ,\n"
- "key : hex_md5( n.nonce + username + hex_md5( username + \":mongo:\" + pass ) )\n"
- "}\n"
+ "key : this.__pwHash( n.nonce, username, pass )}\n"
");\n"
- "return a.ok;\n"
- "}\n"
- "/**\n"
- "Create a new collection in the database. Normally, collection creation is automatic. You would\n"
- "use this function if you wish to specify special options on creation.\n"
- "If the collection already exists, no action occurs.\n"
- "<p>Options:</p>\n"
- "<ul>\n"
- "<li>\n"
- "size: desired initial extent size for the collection. Must be <= 1000000000.\n"
- "for fixed size (capped) collections, this size is the total/max size of the\n"
- "collection.\n"
- "</li>\n"
- "<li>\n"
- "capped: if true, this is a capped collection (where old data rolls out).\n"
- "</li>\n"
- "<li> max: maximum number of objects if capped (optional).</li>\n"
- "</ul>\n"
- "<p>Example: </p>\n"
- "<code>db.createCollection(\"movies\", { size: 10 * 1024 * 1024, capped:true } );</code>\n"
- "* @param {String} name Name of new collection to create\n"
- "* @param {Object} options Object with options for call. Options are listed above.\n"
- "* @return SOMETHING_FIXME\n"
- "*/\n"
+ "return a.ok;}\n"
+ "\n"
"DB.prototype.createCollection = function(name, opt) {\n"
"var options = opt || {};\n"
"var cmd = { create: name, capped: options.capped, size: options.size, max: options.max };\n"
"var res = this._dbCommand(cmd);\n"
- "return res;\n"
- "}\n"
- "/**\n"
- "* Returns the current profiling level of this database\n"
- "* @return SOMETHING_FIXME or null on error\n"
- "*/\n"
+ "return res;}\n"
+ "\n"
"DB.prototype.getProfilingLevel = function() {\n"
"var res = this._dbCommand( { profile: -1 } );\n"
- "return res ? res.was : null;\n"
- "}\n"
- "/**\n"
- "Erase the entire database. (!)\n"
- "* @return Object returned has member ok set to true if operation succeeds, false otherwise.\n"
- "*/\n"
+ "return res ? res.was : null;}\n"
+ "\n"
"DB.prototype.dropDatabase = function() {\n"
"if ( arguments.length )\n"
"throw \"dropDatabase doesn't take arguments\";\n"
- "return this._dbCommand( { dropDatabase: 1 } );\n"
- "}\n"
+ "return this._dbCommand( { dropDatabase: 1 } );}\n"
"DB.prototype.shutdownServer = function() {\n"
"if( \"admin\" != this._name ){\n"
- "return \"shutdown command only works with the admin database; try 'use admin'\";\n"
- "}\n"
+ "return \"shutdown command only works with the admin database; try 'use admin'\";}\n"
"try {\n"
- "this._dbCommand(\"shutdown\");\n"
- "throw \"shutdownServer failed\";\n"
- "}\n"
+ "var res = this._dbCommand(\"shutdown\");\n"
+ "if( res )\n"
+ "throw \"shutdownServer failed: \" + res.errmsg;\n"
+ "throw \"shutdownServer failed\";}\n"
"catch ( e ){\n"
"assert( tojson( e ).indexOf( \"error doing query: failed\" ) >= 0 , \"unexpected error: \" + tojson( e ) );\n"
- "print( \"server should be down...\" );\n"
- "}\n"
- "}\n"
- "/**\n"
- "Clone database on another server to here.\n"
- "<p>\n"
- "Generally, you should dropDatabase() first as otherwise the cloned information will MERGE\n"
- "into whatever data is already present in this database. (That is however a valid way to use\n"
- "clone if you are trying to do something intentionally, such as union three non-overlapping\n"
- "databases into one.)\n"
- "<p>\n"
- "This is a low level administrative function will is not typically used.\n"
- "* @param {String} from Where to clone from (dbhostname[:port]). May not be this database\n"
- "(self) as you cannot clone to yourself.\n"
- "* @return Object returned has member ok set to true if operation succeeds, false otherwise.\n"
- "* See also: db.copyDatabase()\n"
- "*/\n"
+ "print( \"server should be down...\" );}}\n"
+ "\n"
"DB.prototype.cloneDatabase = function(from) {\n"
"assert( isString(from) && from.length );\n"
- "return this._dbCommand( { clone: from } );\n"
- "}\n"
- "/**\n"
- "Clone collection on another server to here.\n"
- "<p>\n"
- "Generally, you should drop() first as otherwise the cloned information will MERGE\n"
- "into whatever data is already present in this collection. (That is however a valid way to use\n"
- "clone if you are trying to do something intentionally, such as union three non-overlapping\n"
- "collections into one.)\n"
- "<p>\n"
- "This is a low level administrative function is not typically used.\n"
- "* @param {String} from mongod instance from which to clnoe (dbhostname:port). May\n"
- "not be this mongod instance, as clone from self is not allowed.\n"
- "* @param {String} collection name of collection to clone.\n"
- "* @param {Object} query query specifying which elements of collection are to be cloned.\n"
- "* @return Object returned has member ok set to true if operation succeeds, false otherwise.\n"
- "* See also: db.cloneDatabase()\n"
- "*/\n"
+ "return this._dbCommand( { clone: from } );}\n"
+ "\n"
"DB.prototype.cloneCollection = function(from, collection, query) {\n"
"assert( isString(from) && from.length );\n"
"assert( isString(collection) && collection.length );\n"
"collection = this._name + \".\" + collection;\n"
"query = query || {};\n"
- "return this._dbCommand( { cloneCollection:collection, from:from, query:query } );\n"
- "}\n"
- "/**\n"
- "Copy database from one server or name to another server or name.\n"
- "Generally, you should dropDatabase() first as otherwise the copied information will MERGE\n"
- "into whatever data is already present in this database (and you will get duplicate objects\n"
- "in collections potentially.)\n"
- "For security reasons this function only works when executed on the \"admin\" db. However,\n"
- "if you have access to said db, you can copy any database from one place to another.\n"
- "This method provides a way to \"rename\" a database by copying it to a new db name and\n"
- "location. Additionally, it effectively provides a repair facility.\n"
- "* @param {String} fromdb database name from which to copy.\n"
- "* @param {String} todb database name to copy to.\n"
- "* @param {String} fromhost hostname of the database (and optionally, \":port\") from which to\n"
- "copy the data. default if unspecified is to copy from self.\n"
- "* @return Object returned has member ok set to true if operation succeeds, false otherwise.\n"
- "* See also: db.clone()\n"
- "*/\n"
- "DB.prototype.copyDatabase = function(fromdb, todb, fromhost) {\n"
+ "return this._dbCommand( { cloneCollection:collection, from:from, query:query } );}\n"
+ "\n"
+ "DB.prototype.copyDatabase = function(fromdb, todb, fromhost, username, password) {\n"
"assert( isString(fromdb) && fromdb.length );\n"
"assert( isString(todb) && todb.length );\n"
"fromhost = fromhost || \"\";\n"
- "return this._adminCommand( { copydb:1, fromhost:fromhost, fromdb:fromdb, todb:todb } );\n"
- "}\n"
- "/**\n"
- "Repair database.\n"
- "* @return Object returned has member ok set to true if operation succeeds, false otherwise.\n"
- "*/\n"
+ "if ( username && password ) {\n"
+ "var n = this._adminCommand( { copydbgetnonce : 1, fromhost:fromhost } );\n"
+ "return this._adminCommand( { copydb:1, fromhost:fromhost, fromdb:fromdb, todb:todb, username:username, nonce:n.nonce, key:this.__pwHash( n.nonce, username, password ) } );\n"
+ "} else {\n"
+ "return this._adminCommand( { copydb:1, fromhost:fromhost, fromdb:fromdb, todb:todb } );}}\n"
+ "\n"
"DB.prototype.repairDatabase = function() {\n"
- "return this._dbCommand( { repairDatabase: 1 } );\n"
- "}\n"
+ "return this._dbCommand( { repairDatabase: 1 } );}\n"
"DB.prototype.help = function() {\n"
"print(\"DB methods:\");\n"
+ "print(\"\\tdb.addUser(username, password[, readOnly=false])\");\n"
"print(\"\\tdb.auth(username, password)\");\n"
- "print(\"\\tdb.getMongo() get the server connection object\");\n"
- "print(\"\\tdb.getMongo().setSlaveOk() allow this connection to read from the nonmaster member of a replica pair\");\n"
- "print(\"\\tdb.getSisterDB(name) get the db at the same server as this onew\");\n"
- "print(\"\\tdb.getName()\");\n"
- "print(\"\\tdb.getCollection(cname) same as db['cname'] or db.cname\");\n"
- "print(\"\\tdb.runCommand(cmdObj) run a database command. if cmdObj is a string, turns it into { cmdObj : 1 }\");\n"
- "print(\"\\tdb.commandHelp(name) returns the help for the command\");\n"
- "print(\"\\tdb.addUser(username, password)\");\n"
- "print(\"\\tdb.removeUser(username)\");\n"
- "print(\"\\tdb.createCollection(name, { size : ..., capped : ..., max : ... } )\");\n"
- "print(\"\\tdb.getReplicationInfo()\");\n"
- "print(\"\\tdb.printReplicationInfo()\");\n"
- "print(\"\\tdb.printSlaveReplicationInfo()\");\n"
- "print(\"\\tdb.getProfilingLevel()\");\n"
- "print(\"\\tdb.setProfilingLevel(level) 0=off 1=slow 2=all\");\n"
"print(\"\\tdb.cloneDatabase(fromhost)\");\n"
+ "print(\"\\tdb.commandHelp(name) returns the help for the command\");\n"
"print(\"\\tdb.copyDatabase(fromdb, todb, fromhost)\");\n"
- "print(\"\\tdb.shutdownServer()\");\n"
+ "print(\"\\tdb.createCollection(name, { size : ..., capped : ..., max : ... } )\");\n"
+ "print(\"\\tdb.currentOp() displays the current operation in the db\" );\n"
"print(\"\\tdb.dropDatabase()\");\n"
- "print(\"\\tdb.repairDatabase()\");\n"
"print(\"\\tdb.eval(func, args) run code server-side\");\n"
+ "print(\"\\tdb.getCollection(cname) same as db['cname'] or db.cname\");\n"
+ "print(\"\\tdb.getCollectionNames()\");\n"
"print(\"\\tdb.getLastError() - just returns the err msg string\");\n"
"print(\"\\tdb.getLastErrorObj() - return full status object\");\n"
+ "print(\"\\tdb.getMongo() get the server connection object\");\n"
+ "print(\"\\tdb.getMongo().setSlaveOk() allow this connection to read from the nonmaster member of a replica pair\");\n"
+ "print(\"\\tdb.getName()\");\n"
"print(\"\\tdb.getPrevError()\");\n"
- "print(\"\\tdb.resetError()\");\n"
- "print(\"\\tdb.getCollectionNames()\");\n"
- "print(\"\\tdb.currentOp() displays the current operation in the db\" );\n"
- "print(\"\\tdb.killOp() kills the current operation in the db\" );\n"
+ "print(\"\\tdb.getProfilingLevel()\");\n"
+ "print(\"\\tdb.getReplicationInfo()\");\n"
+ "print(\"\\tdb.getSisterDB(name) get the db at the same server as this onew\");\n"
+ "print(\"\\tdb.killOp(opid) kills the current operation in the db\" );\n"
"print(\"\\tdb.printCollectionStats()\" );\n"
- "print(\"\\tdb.version() current version of the server\" );\n"
- "}\n"
+ "print(\"\\tdb.printReplicationInfo()\");\n"
+ "print(\"\\tdb.printSlaveReplicationInfo()\");\n"
+ "print(\"\\tdb.printShardingStatus()\");\n"
+ "print(\"\\tdb.removeUser(username)\");\n"
+ "print(\"\\tdb.repairDatabase()\");\n"
+ "print(\"\\tdb.resetError()\");\n"
+ "print(\"\\tdb.runCommand(cmdObj) run a database command. if cmdObj is a string, turns it into { cmdObj : 1 }\");\n"
+ "print(\"\\tdb.setProfilingLevel(level,<slowms>) 0=off 1=slow 2=all\");\n"
+ "print(\"\\tdb.shutdownServer()\");\n"
+ "print(\"\\tdb.stats()\");\n"
+ "print(\"\\tdb.version() current version of the server\" );}\n"
"DB.prototype.printCollectionStats = function(){\n"
+ "var mydb = this;\n"
"this.getCollectionNames().forEach(\n"
"function(z){\n"
"print( z );\n"
- "printjson( db[z].stats() );\n"
- "print( \"---\" );\n"
- "}\n"
- ");\n"
- "}\n"
- "/**\n"
- "* <p> Set profiling level for your db. Profiling gathers stats on query performance. </p>\n"
- "*\n"
- "* <p>Default is off, and resets to off on a database restart -- so if you want it on,\n"
- "* turn it on periodically. </p>\n"
- "*\n"
- "* <p>Levels :</p>\n"
- "* <ul>\n"
- "* <li>0=off</li>\n"
- "* <li>1=log very slow (>100ms) operations</li>\n"
- "* <li>2=log all</li>\n"
- "* @param {String} level Desired level of profiling\n"
- "* @return SOMETHING_FIXME or null on error\n"
- "*/\n"
- "DB.prototype.setProfilingLevel = function(level) {\n"
+ "printjson( mydb.getCollection(z).stats() );\n"
+ "print( \"---\" );}\n"
+ ");}\n"
+ "\n"
+ "DB.prototype.setProfilingLevel = function(level,slowms) {\n"
"if (level < 0 || level > 2) {\n"
- "throw { dbSetProfilingException : \"input level \" + level + \" is out of range [0..2]\" };\n"
- "}\n"
- "if (level) {\n"
- "this.createCollection(\"system.profile\", { capped: true, size: 128 * 1024 } );\n"
- "}\n"
- "return this._dbCommand( { profile: level } );\n"
- "}\n"
- "/**\n"
- "* <p> Evaluate a js expression at the database server.</p>\n"
- "*\n"
- "* <p>Useful if you need to touch a lot of data lightly; in such a scenario\n"
- "* the network transfer of the data could be a bottleneck. A good example\n"
- "* is \"select count(*)\" -- can be done server side via this mechanism.\n"
- "* </p>\n"
- "*\n"
- "* <p>\n"
- "* If the eval fails, an exception is thrown of the form:\n"
- "* </p>\n"
- "* <code>{ dbEvalException: { retval: functionReturnValue, ok: num [, errno: num] [, errmsg: str] } }</code>\n"
- "*\n"
- "* <p>Example: </p>\n"
- "* <code>print( \"mycount: \" + db.eval( function(){db.mycoll.find({},{_id:ObjId()}).length();} );</code>\n"
- "*\n"
- "* @param {Function} jsfunction Javascript function to run on server. Note this it not a closure, but rather just \"code\".\n"
- "* @return result of your function, or null if error\n"
- "*\n"
- "*/\n"
+ "throw { dbSetProfilingException : \"input level \" + level + \" is out of range [0..2]\" };}\n"
+ "var cmd = { profile: level };\n"
+ "if ( slowms )\n"
+ "cmd[\"slowms\"] = slowms;\n"
+ "return this._dbCommand( cmd );}\n"
+ "\n"
"DB.prototype.eval = function(jsfunction) {\n"
"var cmd = { $eval : jsfunction };\n"
"if ( arguments.length > 1 ) {\n"
- "cmd.args = argumentsToArray( arguments ).slice(1);\n"
- "}\n"
+ "cmd.args = argumentsToArray( arguments ).slice(1);}\n"
"var res = this._dbCommand( cmd );\n"
"if (!res.ok)\n"
"throw tojson( res );\n"
- "return res.retval;\n"
- "}\n"
+ "return res.retval;}\n"
"DB.prototype.dbEval = DB.prototype.eval;\n"
- "/**\n"
- "*\n"
- "* <p>\n"
- "* Similar to SQL group by. For example: </p>\n"
- "*\n"
- "* <code>select a,b,sum(c) csum from coll where active=1 group by a,b</code>\n"
- "*\n"
- "* <p>\n"
- "* corresponds to the following in 10gen:\n"
- "* </p>\n"
- "*\n"
- "* <code>\n"
- "db.group(\n"
- "{\n"
- "ns: \"coll\",\n"
- "key: { a:true, b:true },\n"
- "cond: { active:1 },\n"
- "reduce: function(obj,prev) { prev.csum += obj.c; } ,\n"
- "initial: { csum: 0 }\n"
- "});\n"
- "</code>\n"
- "*\n"
- "*\n"
- "* <p>\n"
- "* An array of grouped items is returned. The array must fit in RAM, thus this function is not\n"
- "* suitable when the return set is extremely large.\n"
- "* </p>\n"
- "* <p>\n"
- "* To order the grouped data, simply sort it client side upon return.\n"
- "* <p>\n"
- "Defaults\n"
- "cond may be null if you want to run against all rows in the collection\n"
- "keyf is a function which takes an object and returns the desired key. set either key or keyf (not both).\n"
- "* </p>\n"
- "*/\n"
+ "\n"
"DB.prototype.groupeval = function(parmsObj) {\n"
"var groupFunction = function() {\n"
"var parms = args[0];\n"
"var c = db[parms.ns].find(parms.cond||{});\n"
"var map = new Map();\n"
- "var pks = parms.key ? parms.key.keySet() : null;\n"
+ "var pks = parms.key ? Object.keySet( parms.key ) : null;\n"
"var pkl = pks ? pks.length : 0;\n"
"var key = {};\n"
"while( c.hasNext() ) {\n"
@@ -726,114 +746,82 @@ const char * jsconcatcode =
"if ( pks ) {\n"
"for( var i=0; i<pkl; i++ ){\n"
"var k = pks[i];\n"
- "key[k] = obj[k];\n"
- "}\n"
- "}\n"
+ "key[k] = obj[k];}}\n"
"else {\n"
- "key = parms.$keyf(obj);\n"
- "}\n"
+ "key = parms.$keyf(obj);}\n"
"var aggObj = map.get(key);\n"
"if( aggObj == null ) {\n"
"var newObj = Object.extend({}, key);\n"
"aggObj = Object.extend(newObj, parms.initial)\n"
- "map.put( key , aggObj );\n"
- "}\n"
- "parms.$reduce(obj, aggObj);\n"
- "}\n"
- "return map.values();\n"
- "}\n"
- "return this.eval(groupFunction, this._groupFixParms( parmsObj ));\n"
- "}\n"
+ "map.put( key , aggObj );}\n"
+ "parms.$reduce(obj, aggObj);}\n"
+ "return map.values();}\n"
+ "return this.eval(groupFunction, this._groupFixParms( parmsObj ));}\n"
"DB.prototype.groupcmd = function( parmsObj ){\n"
"var ret = this.runCommand( { \"group\" : this._groupFixParms( parmsObj ) } );\n"
"if ( ! ret.ok ){\n"
- "throw \"group command failed: \" + tojson( ret );\n"
- "}\n"
- "return ret.retval;\n"
- "}\n"
+ "throw \"group command failed: \" + tojson( ret );}\n"
+ "return ret.retval;}\n"
"DB.prototype.group = DB.prototype.groupcmd;\n"
"DB.prototype._groupFixParms = function( parmsObj ){\n"
"var parms = Object.extend({}, parmsObj);\n"
"if( parms.reduce ) {\n"
"parms.$reduce = parms.reduce;\n"
- "delete parms.reduce;\n"
- "}\n"
+ "delete parms.reduce;}\n"
"if( parms.keyf ) {\n"
"parms.$keyf = parms.keyf;\n"
- "delete parms.keyf;\n"
- "}\n"
- "return parms;\n"
- "}\n"
+ "delete parms.keyf;}\n"
+ "return parms;}\n"
"DB.prototype.resetError = function(){\n"
- "return this.runCommand( { reseterror : 1 } );\n"
- "}\n"
+ "return this.runCommand( { reseterror : 1 } );}\n"
"DB.prototype.forceError = function(){\n"
- "return this.runCommand( { forceerror : 1 } );\n"
- "}\n"
+ "return this.runCommand( { forceerror : 1 } );}\n"
"DB.prototype.getLastError = function(){\n"
"var res = this.runCommand( { getlasterror : 1 } );\n"
"if ( ! res.ok )\n"
"throw \"getlasterror failed: \" + tojson( res );\n"
- "return res.err;\n"
- "}\n"
+ "return res.err;}\n"
"DB.prototype.getLastErrorObj = function(){\n"
"var res = this.runCommand( { getlasterror : 1 } );\n"
"if ( ! res.ok )\n"
"throw \"getlasterror failed: \" + tojson( res );\n"
- "return res;\n"
- "}\n"
+ "return res;}\n"
+ "DB.prototype.getLastErrorCmd = DB.prototype.getLastErrorObj;\n"
"/* Return the last error which has occurred, even if not the very last error.\n"
"Returns:\n"
"{ err : <error message>, nPrev : <how_many_ops_back_occurred>, ok : 1 }\n"
"result.err will be null if no error has occurred.\n"
"*/\n"
"DB.prototype.getPrevError = function(){\n"
- "return this.runCommand( { getpreverror : 1 } );\n"
- "}\n"
+ "return this.runCommand( { getpreverror : 1 } );}\n"
"DB.prototype.getCollectionNames = function(){\n"
"var all = [];\n"
"var nsLength = this._name.length + 1;\n"
"this.getCollection( \"system.namespaces\" ).find().sort({name:1}).forEach(\n"
"function(z){\n"
"var name = z.name;\n"
- "if ( name.indexOf( \"$\" ) >= 0 )\n"
+ "if ( name.indexOf( \"$\" ) >= 0 && name != \"local.oplog.$main\" )\n"
"return;\n"
- "all.push( name.substring( nsLength ) );\n"
- "}\n"
+ "all.push( name.substring( nsLength ) );}\n"
");\n"
- "return all;\n"
- "}\n"
+ "return all;}\n"
"DB.prototype.tojson = function(){\n"
- "return this._name;\n"
- "}\n"
+ "return this._name;}\n"
"DB.prototype.toString = function(){\n"
- "return this._name;\n"
- "}\n"
+ "return this._name;}\n"
"DB.prototype.currentOp = function(){\n"
- "return db.$cmd.sys.inprog.findOne();\n"
- "}\n"
+ "return db.$cmd.sys.inprog.findOne();}\n"
"DB.prototype.currentOP = DB.prototype.currentOp;\n"
- "DB.prototype.killOp = function(){\n"
- "return db.$cmd.sys.killop.findOne();\n"
- "}\n"
+ "DB.prototype.killOp = function(op) {\n"
+ "if( !op )\n"
+ "throw \"no opNum to kill specified\";\n"
+ "return db.$cmd.sys.killop.findOne({'op':op});}\n"
"DB.prototype.killOP = DB.prototype.killOp;\n"
- "/**\n"
- "Get a replication log information summary.\n"
- "<p>\n"
- "This command is for the database/cloud administer and not applicable to most databases.\n"
- "It is only used with the local database. One might invoke from the JS shell:\n"
- "<pre>\n"
- "use local\n"
- "db.getReplicationInfo();\n"
- "</pre>\n"
- "It is assumed that this database is a replication master -- the information returned is\n"
- "about the operation log stored at local.oplog.$main on the replication master. (It also\n"
- "works on a machine in a replica pair: for replica pairs, both machines are \"masters\" from\n"
- "an internal database perspective.\n"
- "<p>\n"
- "* @return Object timeSpan: time span of the oplog from start to end if slave is more out\n"
- "* of date than that, it can't recover without a complete resync\n"
- "*/\n"
+ "DB.tsToSeconds = function(x){\n"
+ "if ( x.t && x.i )\n"
+ "return x.t / 1000;\n"
+ "return x / 4294967296;}\n"
+ "\n"
"DB.prototype.getReplicationInfo = function() {\n"
"var db = this.getSisterDB(\"local\");\n"
"var result = { };\n"
@@ -842,81 +830,66 @@ const char * jsconcatcode =
"result.logSizeMB = ol.options.size / 1000 / 1000;\n"
"} else {\n"
"result.errmsg = \"local.oplog.$main, or its options, not found in system.namespaces collection (not --master?)\";\n"
- "return result;\n"
- "}\n"
+ "return result;}\n"
"var firstc = db.oplog.$main.find().sort({$natural:1}).limit(1);\n"
"var lastc = db.oplog.$main.find().sort({$natural:-1}).limit(1);\n"
"if( !firstc.hasNext() || !lastc.hasNext() ) {\n"
"result.errmsg = \"objects not found in local.oplog.$main -- is this a new and empty db instance?\";\n"
"result.oplogMainRowCount = db.oplog.$main.count();\n"
- "return result;\n"
- "}\n"
+ "return result;}\n"
"var first = firstc.next();\n"
"var last = lastc.next();\n"
"{\n"
"var tfirst = first.ts;\n"
"var tlast = last.ts;\n"
"if( tfirst && tlast ) {\n"
- "tfirst = tfirst / 4294967296;\n"
- "tlast = tlast / 4294967296;\n"
+ "tfirst = DB.tsToSeconds( tfirst );\n"
+ "tlast = DB.tsToSeconds( tlast );\n"
"result.timeDiff = tlast - tfirst;\n"
"result.timeDiffHours = Math.round(result.timeDiff / 36)/100;\n"
"result.tFirst = (new Date(tfirst*1000)).toString();\n"
"result.tLast = (new Date(tlast*1000)).toString();\n"
- "result.now = Date();\n"
- "}\n"
+ "result.now = Date();}\n"
"else {\n"
- "result.errmsg = \"ts element not found in oplog objects\";\n"
- "}\n"
- "}\n"
- "return result;\n"
- "}\n"
+ "result.errmsg = \"ts element not found in oplog objects\";}}\n"
+ "return result;}\n"
"DB.prototype.printReplicationInfo = function() {\n"
"var result = this.getReplicationInfo();\n"
"if( result.errmsg ) {\n"
"print(tojson(result));\n"
- "return;\n"
- "}\n"
+ "return;}\n"
"print(\"configured oplog size: \" + result.logSizeMB + \"MB\");\n"
"print(\"log length start to end: \" + result.timeDiff + \"secs (\" + result.timeDiffHours + \"hrs)\");\n"
"print(\"oplog first event time: \" + result.tFirst);\n"
"print(\"oplog last event time: \" + result.tLast);\n"
- "print(\"now: \" + result.now);\n"
- "}\n"
+ "print(\"now: \" + result.now);}\n"
"DB.prototype.printSlaveReplicationInfo = function() {\n"
"function g(x) {\n"
"print(\"source: \" + x.host);\n"
- "var st = new Date(x.syncedTo/4294967296*1000);\n"
+ "var st = new Date( DB.tsToSeconds( x.syncedTo ) * 1000 );\n"
"var now = new Date();\n"
"print(\"syncedTo: \" + st.toString() );\n"
"var ago = (now-st)/1000;\n"
"var hrs = Math.round(ago/36)/100;\n"
- "print(\" = \" + Math.round(ago) + \"secs ago (\" + hrs + \"hrs)\");\n"
- "}\n"
+ "print(\" = \" + Math.round(ago) + \"secs ago (\" + hrs + \"hrs)\");}\n"
"var L = this.getSisterDB(\"local\");\n"
"if( L.sources.count() == 0 ) {\n"
"print(\"local.sources is empty; is this db a --slave?\");\n"
- "return;\n"
- "}\n"
- "L.sources.find().forEach(g);\n"
- "}\n"
+ "return;}\n"
+ "L.sources.find().forEach(g);}\n"
"DB.prototype.serverBuildInfo = function(){\n"
- "return this._adminCommand( \"buildinfo\" );\n"
- "}\n"
+ "return this._adminCommand( \"buildinfo\" );}\n"
"DB.prototype.serverStatus = function(){\n"
- "return this._adminCommand( \"serverStatus\" );\n"
- "}\n"
+ "return this._adminCommand( \"serverStatus\" );}\n"
"DB.prototype.version = function(){\n"
- "return this.serverBuildInfo().version;\n"
- "}\n"
+ "return this.serverBuildInfo().version;}\n"
+ "DB.prototype.printShardingStatus = function(){\n"
+ "printShardingStatus( this.getSisterDB( \"config\" ) );}\n"
"if ( typeof Mongo == \"undefined\" ){\n"
"Mongo = function( host ){\n"
- "this.init( host );\n"
- "}\n"
- "}\n"
+ "this.init( host );}}\n"
"if ( ! Mongo.prototype ){\n"
- "throw \"Mongo.prototype not defined\";\n"
- "}\n"
+ "throw \"Mongo.prototype not defined\";}\n"
"if ( ! Mongo.prototype.find )\n"
"Mongo.prototype.find = function( ns , query , fields , limit , skip ){ throw \"find not implemented\"; }\n"
"if ( ! Mongo.prototype.insert )\n"
@@ -926,31 +899,31 @@ const char * jsconcatcode =
"if ( ! Mongo.prototype.update )\n"
"Mongo.prototype.update = function( ns , query , obj , upsert ){ throw \"update not implemented;\" }\n"
"if ( typeof mongoInject == \"function\" ){\n"
- "mongoInject( Mongo.prototype );\n"
- "}\n"
+ "mongoInject( Mongo.prototype );}\n"
"Mongo.prototype.setSlaveOk = function() {\n"
- "this.slaveOk = true;\n"
- "}\n"
+ "this.slaveOk = true;}\n"
"Mongo.prototype.getDB = function( name ){\n"
- "return new DB( this , name );\n"
- "}\n"
+ "return new DB( this , name );}\n"
"Mongo.prototype.getDBs = function(){\n"
"var res = this.getDB( \"admin\" ).runCommand( { \"listDatabases\" : 1 } );\n"
- "assert( res.ok == 1 , \"listDatabases failed\" );\n"
- "return res;\n"
- "}\n"
+ "assert( res.ok == 1 , \"listDatabases failed:\" + tojson( res ) );\n"
+ "return res;}\n"
"Mongo.prototype.getDBNames = function(){\n"
"return this.getDBs().databases.map(\n"
"function(z){\n"
- "return z.name;\n"
- "}\n"
- ");\n"
- "}\n"
+ "return z.name;}\n"
+ ");}\n"
+ "Mongo.prototype.getCollection = function(ns){\n"
+ "var idx = ns.indexOf( \".\" );\n"
+ "if ( idx < 0 )\n"
+ "throw \"need . in ns\";\n"
+ "var db = ns.substring( 0 , idx );\n"
+ "var c = ns.substring( idx + 1 );\n"
+ "return this.getDB( db ).getCollection( c );}\n"
"Mongo.prototype.toString = function(){\n"
- "return \"mongo connection to \" + this.host;\n"
- "}\n"
+ "return \"mongo connection to \" + this.host;}\n"
"connect = function( url , user , pass ){\n"
- "print( \"connecting to: \" + url )\n"
+ "chatty( \"connecting to: \" + url )\n"
"if ( user && ! pass )\n"
"throw \"you specified a user and not a password. either you need a password, or you're using the old connect api\";\n"
"var idx = url.indexOf( \"/\" );\n"
@@ -961,33 +934,33 @@ const char * jsconcatcode =
"db = new Mongo( url.substring( 0 , idx ) ).getDB( url.substring( idx + 1 ) );\n"
"if ( user && pass ){\n"
"if ( ! db.auth( user , pass ) ){\n"
- "throw \"couldn't login\";\n"
- "}\n"
- "}\n"
- "return db;\n"
- "}\n"
+ "throw \"couldn't login\";}}\n"
+ "return db;}\n"
"MR = {};\n"
"MR.init = function(){\n"
"$max = 0;\n"
"$arr = [];\n"
"emit = MR.emit;\n"
- "gc();\n"
- "}\n"
+ "$numEmits = 0;\n"
+ "$numReduces = 0;\n"
+ "$numReducesToDB = 0;\n"
+ "gc();}\n"
"MR.cleanup = function(){\n"
"MR.init();\n"
- "gc();\n"
- "}\n"
+ "gc();}\n"
"MR.emit = function(k,v){\n"
- "var num = get_num( k );\n"
+ "$numEmits++;\n"
+ "var num = nativeHelper.apply( get_num_ , [ k ] );\n"
"var data = $arr[num];\n"
"if ( ! data ){\n"
- "data = { key : k , values : [] };\n"
- "$arr[num] = data;\n"
- "}\n"
- "data.values.push( v );\n"
- "$max = Math.max( $max , data.values.length );\n"
- "}\n"
+ "data = { key : k , values : new Array(1000) , count : 0 };\n"
+ "$arr[num] = data;}\n"
+ "data.values[data.count++] = v;\n"
+ "$max = Math.max( $max , data.count );}\n"
"MR.doReduce = function( useDB ){\n"
+ "$numReduces++;\n"
+ "if ( useDB )\n"
+ "$numReducesToDB++;\n"
"$max = 0;\n"
"for ( var i=0; i<$arr.length; i++){\n"
"var data = $arr[i];\n"
@@ -996,44 +969,40 @@ const char * jsconcatcode =
"if ( useDB ){\n"
"var x = tempcoll.findOne( { _id : data.key } );\n"
"if ( x ){\n"
- "data.values.push( x.value );\n"
- "}\n"
- "}\n"
- "var r = $reduce( data.key , data.values );\n"
- "if ( r.length && r[0] ){\n"
+ "data.values[data.count++] = x.value;}}\n"
+ "var r = $reduce( data.key , data.values.slice( 0 , data.count ) );\n"
+ "if ( r && r.length && r[0] ){\n"
"data.values = r;\n"
- "}\n"
+ "data.count = r.length;}\n"
"else{\n"
- "data.values = [ r ];\n"
- "}\n"
- "$max = Math.max( $max , data.values.length );\n"
+ "data.values[0] = r;\n"
+ "data.count = 1;}\n"
+ "$max = Math.max( $max , data.count );\n"
"if ( useDB ){\n"
- "if ( data.values.length == 1 ){\n"
- "tempcoll.save( { _id : data.key , value : data.values[0] } );\n"
- "}\n"
+ "if ( data.count == 1 ){\n"
+ "tempcoll.save( { _id : data.key , value : data.values[0] } );}\n"
"else {\n"
- "tempcoll.save( { _id : data.key , value : data.values } );\n"
- "}\n"
- "}\n"
- "}\n"
- "}\n"
+ "tempcoll.save( { _id : data.key , value : data.values.slice( 0 , data.count ) } );}}}}\n"
"MR.check = function(){\n"
"if ( $max < 2000 && $arr.length < 1000 ){\n"
- "return 0;\n"
- "}\n"
+ "return 0;}\n"
"MR.doReduce();\n"
"if ( $max < 2000 && $arr.length < 1000 ){\n"
- "return 1;\n"
- "}\n"
+ "return 1;}\n"
"MR.doReduce( true );\n"
"$arr = [];\n"
"$max = 0;\n"
"reset_num();\n"
"gc();\n"
- "return 2;\n"
- "}\n"
+ "return 2;}\n"
+ "MR.finalize = function(){\n"
+ "tempcoll.find().forEach(\n"
+ "function(z){\n"
+ "z.value = $finalize( z._id , z.value );\n"
+ "tempcoll.save( z );}\n"
+ ");}\n"
"if ( typeof DBQuery == \"undefined\" ){\n"
- "DBQuery = function( mongo , db , collection , ns , query , fields , limit , skip ){\n"
+ "DBQuery = function( mongo , db , collection , ns , query , fields , limit , skip , batchSize ){\n"
"this._mongo = mongo;\n"
"this._db = db;\n"
"this._collection = collection;\n"
@@ -1042,65 +1011,60 @@ const char * jsconcatcode =
"this._fields = fields;\n"
"this._limit = limit || 0;\n"
"this._skip = skip || 0;\n"
+ "this._batchSize = batchSize || 0;\n"
"this._cursor = null;\n"
"this._numReturned = 0;\n"
- "this._special = false;\n"
- "}\n"
- "print( \"DBQuery probably won't have array access \" );\n"
- "}\n"
+ "this._special = false;}\n"
+ "print( \"DBQuery probably won't have array access \" );}\n"
"DBQuery.prototype.help = function(){\n"
"print( \"DBQuery help\" );\n"
"print( \"\\t.sort( {...} )\" )\n"
"print( \"\\t.limit( n )\" )\n"
"print( \"\\t.skip( n )\" )\n"
- "print( \"\\t.count()\" )\n"
+ "print( \"\\t.count() - total # of objects matching query, ignores skip,limit\" )\n"
+ "print( \"\\t.size() - total # of objects cursor would return skip,limit effect this\" )\n"
"print( \"\\t.explain()\" )\n"
"print( \"\\t.forEach( func )\" )\n"
- "print( \"\\t.map( func )\" )\n"
- "}\n"
+ "print( \"\\t.map( func )\" )}\n"
"DBQuery.prototype.clone = function(){\n"
"var q = new DBQuery( this._mongo , this._db , this._collection , this._ns ,\n"
"this._query , this._fields ,\n"
- "this._limit , this._skip );\n"
+ "this._limit , this._skip , this._batchSize );\n"
"q._special = this._special;\n"
- "return q;\n"
- "}\n"
+ "return q;}\n"
"DBQuery.prototype._ensureSpecial = function(){\n"
"if ( this._special )\n"
"return;\n"
"var n = { query : this._query };\n"
"this._query = n;\n"
- "this._special = true;\n"
- "}\n"
+ "this._special = true;}\n"
"DBQuery.prototype._checkModify = function(){\n"
"if ( this._cursor )\n"
- "throw \"query already executed\";\n"
- "}\n"
+ "throw \"query already executed\";}\n"
"DBQuery.prototype._exec = function(){\n"
"if ( ! this._cursor ){\n"
"assert.eq( 0 , this._numReturned );\n"
- "this._cursor = this._mongo.find( this._ns , this._query , this._fields , this._limit , this._skip );\n"
- "this._cursorSeen = 0;\n"
- "}\n"
- "return this._cursor;\n"
- "}\n"
+ "this._cursor = this._mongo.find( this._ns , this._query , this._fields , this._limit , this._skip , this._batchSize );\n"
+ "this._cursorSeen = 0;}\n"
+ "return this._cursor;}\n"
"DBQuery.prototype.limit = function( limit ){\n"
"this._checkModify();\n"
"this._limit = limit;\n"
- "return this;\n"
- "}\n"
+ "return this;}\n"
+ "DBQuery.prototype.batchSize = function( batchSize ){\n"
+ "this._checkModify();\n"
+ "this._batchSize = batchSize;\n"
+ "return this;}\n"
"DBQuery.prototype.skip = function( skip ){\n"
"this._checkModify();\n"
"this._skip = skip;\n"
- "return this;\n"
- "}\n"
+ "return this;}\n"
"DBQuery.prototype.hasNext = function(){\n"
"this._exec();\n"
"if ( this._limit > 0 && this._cursorSeen >= this._limit )\n"
"return false;\n"
"var o = this._cursor.hasNext();\n"
- "return o;\n"
- "}\n"
+ "return o;}\n"
"DBQuery.prototype.next = function(){\n"
"this._exec();\n"
"var o = this._cursor.hasNext();\n"
@@ -1112,8 +1076,7 @@ const char * jsconcatcode =
"if ( ret.$err && this._numReturned == 0 && ! this.hasNext() )\n"
"throw \"error: \" + tojson( ret );\n"
"this._numReturned++;\n"
- "return ret;\n"
- "}\n"
+ "return ret;}\n"
"DBQuery.prototype.toArray = function(){\n"
"if ( this._arr )\n"
"return this._arr;\n"
@@ -1121,886 +1084,446 @@ const char * jsconcatcode =
"while ( this.hasNext() )\n"
"a.push( this.next() );\n"
"this._arr = a;\n"
- "return a;\n"
- "}\n"
- "DBQuery.prototype.count = function(){\n"
+ "return a;}\n"
+ "DBQuery.prototype.count = function( applySkipLimit ){\n"
"var cmd = { count: this._collection.getName() };\n"
"if ( this._query ){\n"
"if ( this._special )\n"
"cmd.query = this._query.query;\n"
"else\n"
- "cmd.query = this._query;\n"
- "}\n"
+ "cmd.query = this._query;}\n"
"cmd.fields = this._fields || {};\n"
+ "if ( applySkipLimit ){\n"
+ "if ( this._limit )\n"
+ "cmd.limit = this._limit;\n"
+ "if ( this._skip )\n"
+ "cmd.skip = this._skip;}\n"
"var res = this._db.runCommand( cmd );\n"
"if( res && res.n != null ) return res.n;\n"
- "throw \"count failed: \" + tojson( res );\n"
- "}\n"
+ "throw \"count failed: \" + tojson( res );}\n"
+ "DBQuery.prototype.size = function(){\n"
+ "return this.count( true );}\n"
"DBQuery.prototype.countReturn = function(){\n"
"var c = this.count();\n"
"if ( this._skip )\n"
"c = c - this._skip;\n"
"if ( this._limit > 0 && this._limit < c )\n"
"return this._limit;\n"
- "return c;\n"
- "}\n"
- "/**\n"
- "* iterative count - only for testing\n"
- "*/\n"
+ "return c;}\n"
+ "\n"
"DBQuery.prototype.itcount = function(){\n"
"var num = 0;\n"
"while ( this.hasNext() ){\n"
"num++;\n"
- "this.next();\n"
- "}\n"
- "return num;\n"
- "}\n"
+ "this.next();}\n"
+ "return num;}\n"
"DBQuery.prototype.length = function(){\n"
- "return this.toArray().length;\n"
- "}\n"
- "DBQuery.prototype.sort = function( sortBy ){\n"
+ "return this.toArray().length;}\n"
+ "DBQuery.prototype._addSpecial = function( name , value ){\n"
"this._ensureSpecial();\n"
- "this._query.orderby = sortBy;\n"
- "return this;\n"
- "}\n"
+ "this._query[name] = value;\n"
+ "return this;}\n"
+ "DBQuery.prototype.sort = function( sortBy ){\n"
+ "return this._addSpecial( \"orderby\" , sortBy );}\n"
"DBQuery.prototype.hint = function( hint ){\n"
- "this._ensureSpecial();\n"
- "this._query[\"$hint\"] = hint;\n"
- "return this;\n"
- "}\n"
+ "return this._addSpecial( \"$hint\" , hint );}\n"
"DBQuery.prototype.min = function( min ) {\n"
- "this._ensureSpecial();\n"
- "this._query[\"$min\"] = min;\n"
- "return this;\n"
- "}\n"
+ "return this._addSpecial( \"$min\" , min );}\n"
"DBQuery.prototype.max = function( max ) {\n"
- "this._ensureSpecial();\n"
- "this._query[\"$max\"] = max;\n"
- "return this;\n"
- "}\n"
+ "return this._addSpecial( \"$max\" , max );}\n"
"DBQuery.prototype.forEach = function( func ){\n"
"while ( this.hasNext() )\n"
- "func( this.next() );\n"
- "}\n"
+ "func( this.next() );}\n"
"DBQuery.prototype.map = function( func ){\n"
"var a = [];\n"
"while ( this.hasNext() )\n"
"a.push( func( this.next() ) );\n"
- "return a;\n"
- "}\n"
+ "return a;}\n"
"DBQuery.prototype.arrayAccess = function( idx ){\n"
- "return this.toArray()[idx];\n"
- "}\n"
+ "return this.toArray()[idx];}\n"
"DBQuery.prototype.explain = function(){\n"
"var n = this.clone();\n"
"n._ensureSpecial();\n"
"n._query.$explain = true;\n"
"n._limit = n._limit * -1;\n"
- "return n.next();\n"
- "}\n"
+ "return n.next();}\n"
"DBQuery.prototype.snapshot = function(){\n"
"this._ensureSpecial();\n"
"this._query.$snapshot = true;\n"
- "return this;\n"
- "}\n"
+ "return this;}\n"
"DBQuery.prototype.shellPrint = function(){\n"
"try {\n"
"var n = 0;\n"
"while ( this.hasNext() && n < 20 ){\n"
- "var s = tojson( this.next() );\n"
+ "var s = tojson( this.next() , \"\" , true );\n"
"print( s );\n"
- "n++;\n"
- "}\n"
+ "n++;}\n"
"if ( this.hasNext() ){\n"
"print( \"has more\" );\n"
- "___it___ = this;\n"
- "}\n"
+ "___it___ = this;}\n"
"else {\n"
- "___it___ = null;\n"
- "}\n"
- "}\n"
+ "___it___ = null;}}\n"
"catch ( e ){\n"
- "print( e );\n"
- "}\n"
- "}\n"
+ "print( e );}}\n"
"DBQuery.prototype.toString = function(){\n"
- "return \"DBQuery: \" + this._ns + \" -> \" + tojson( this.query );\n"
- "}\n"
- "_parsePath = function() {\n"
- "var dbpath = \"\";\n"
- "for( var i = 0; i < arguments.length; ++i )\n"
- "if ( arguments[ i ] == \"--dbpath\" )\n"
- "dbpath = arguments[ i + 1 ];\n"
- "if ( dbpath == \"\" )\n"
- "throw \"No dbpath specified\";\n"
- "return dbpath;\n"
- "}\n"
- "_parsePort = function() {\n"
- "var port = \"\";\n"
- "for( var i = 0; i < arguments.length; ++i )\n"
- "if ( arguments[ i ] == \"--port\" )\n"
- "port = arguments[ i + 1 ];\n"
- "if ( port == \"\" )\n"
- "throw \"No port specified\";\n"
- "return port;\n"
- "}\n"
- "createMongoArgs = function( binaryName , args ){\n"
- "var fullArgs = [ binaryName ];\n"
- "if ( args.length == 1 && isObject( args[0] ) ){\n"
- "var o = args[0];\n"
+ "return \"DBQuery: \" + this._ns + \" -> \" + tojson( this.query );}\n"
+ "if ( ( typeof DBCollection ) == \"undefined\" ){\n"
+ "DBCollection = function( mongo , db , shortName , fullName ){\n"
+ "this._mongo = mongo;\n"
+ "this._db = db;\n"
+ "this._shortName = shortName;\n"
+ "this._fullName = fullName;\n"
+ "this.verify();}}\n"
+ "DBCollection.prototype.verify = function(){\n"
+ "assert( this._fullName , \"no fullName\" );\n"
+ "assert( this._shortName , \"no shortName\" );\n"
+ "assert( this._db , \"no db\" );\n"
+ "assert.eq( this._fullName , this._db._name + \".\" + this._shortName , \"name mismatch\" );\n"
+ "assert( this._mongo , \"no mongo in DBCollection\" );}\n"
+ "DBCollection.prototype.getName = function(){\n"
+ "return this._shortName;}\n"
+ "DBCollection.prototype.help = function() {\n"
+ "print(\"DBCollection help\");\n"
+ "print(\"\\tdb.foo.count()\");\n"
+ "print(\"\\tdb.foo.dataSize()\");\n"
+ "print(\"\\tdb.foo.distinct( key ) - eg. db.foo.distinct( 'x' )\");\n"
+ "print(\"\\tdb.foo.drop() drop the collection\");\n"
+ "print(\"\\tdb.foo.dropIndex(name)\");\n"
+ "print(\"\\tdb.foo.dropIndexes()\");\n"
+ "print(\"\\tdb.foo.ensureIndex(keypattern,options) - options should be an object with these possible fields: name, unique, dropDups\");\n"
+ "print(\"\\tdb.foo.reIndex()\");\n"
+ "print(\"\\tdb.foo.find( [query] , [fields]) - first parameter is an optional query filter. second parameter is optional set of fields to return.\");\n"
+ "print(\"\\t e.g. db.foo.find( { x : 77 } , { name : 1 , x : 1 } )\");\n"
+ "print(\"\\tdb.foo.find(...).count()\");\n"
+ "print(\"\\tdb.foo.find(...).limit(n)\");\n"
+ "print(\"\\tdb.foo.find(...).skip(n)\");\n"
+ "print(\"\\tdb.foo.find(...).sort(...)\");\n"
+ "print(\"\\tdb.foo.findOne([query])\");\n"
+ "print(\"\\tdb.foo.findAndModify( { update : ... , remove : bool [, query: {}, sort: {}, 'new': false] } )\");\n"
+ "print(\"\\tdb.foo.getDB() get DB object associated with collection\");\n"
+ "print(\"\\tdb.foo.getIndexes()\");\n"
+ "print(\"\\tdb.foo.group( { key : ..., initial: ..., reduce : ...[, cond: ...] } )\");\n"
+ "print(\"\\tdb.foo.mapReduce( mapFunction , reduceFunction , <optional params> )\");\n"
+ "print(\"\\tdb.foo.remove(query)\");\n"
+ "print(\"\\tdb.foo.renameCollection( newName , <dropTarget> ) renames the collection.\");\n"
+ "print(\"\\tdb.foo.runCommand( name , <options> ) runs a db command with the given name where the 1st param is the colleciton name\" );\n"
+ "print(\"\\tdb.foo.save(obj)\");\n"
+ "print(\"\\tdb.foo.stats()\");\n"
+ "print(\"\\tdb.foo.storageSize() - includes free space allocated to this collection\");\n"
+ "print(\"\\tdb.foo.totalIndexSize() - size in bytes of all the indexes\");\n"
+ "print(\"\\tdb.foo.totalSize() - storage allocated for all data and indexes\");\n"
+ "print(\"\\tdb.foo.update(query, object[, upsert_bool, multi_bool])\");\n"
+ "print(\"\\tdb.foo.validate() - SLOW\");\n"
+ "print(\"\\tdb.foo.getShardVersion() - only for use with sharding\");}\n"
+ "DBCollection.prototype.getFullName = function(){\n"
+ "return this._fullName;}\n"
+ "DBCollection.prototype.getDB = function(){\n"
+ "return this._db;}\n"
+ "DBCollection.prototype._dbCommand = function( cmd , params ){\n"
+ "if ( typeof( cmd ) == \"object\" )\n"
+ "return this._db._dbCommand( cmd );\n"
+ "var c = {};\n"
+ "c[cmd] = this.getName();\n"
+ "if ( params )\n"
+ "Object.extend( c , params );\n"
+ "return this._db._dbCommand( c );}\n"
+ "DBCollection.prototype.runCommand = DBCollection.prototype._dbCommand;\n"
+ "DBCollection.prototype._massageObject = function( q ){\n"
+ "if ( ! q )\n"
+ "return {};\n"
+ "var type = typeof q;\n"
+ "if ( type == \"function\" )\n"
+ "return { $where : q };\n"
+ "if ( q.isObjectId )\n"
+ "return { _id : q };\n"
+ "if ( type == \"object\" )\n"
+ "return q;\n"
+ "if ( type == \"string\" ){\n"
+ "if ( q.length == 24 )\n"
+ "return { _id : q };\n"
+ "return { $where : q };}\n"
+ "throw \"don't know how to massage : \" + type;}\n"
+ "DBCollection.prototype._validateObject = function( o ){\n"
+ "if ( o._ensureSpecial && o._checkModify )\n"
+ "throw \"can't save a DBQuery object\";}\n"
+ "DBCollection._allowedFields = { $id : 1 , $ref : 1 };\n"
+ "DBCollection.prototype._validateForStorage = function( o ){\n"
+ "this._validateObject( o );\n"
"for ( var k in o ){\n"
- "if ( k == \"v\" && isNumber( o[k] ) ){\n"
- "var n = o[k];\n"
- "if ( n > 0 ){\n"
- "var temp = \"-\";\n"
- "while ( n-- > 0 ) temp += \"v\";\n"
- "fullArgs.push( temp );\n"
- "}\n"
- "}\n"
- "else {\n"
- "fullArgs.push( \"--\" + k );\n"
- "if ( o[k] != \"\" )\n"
- "fullArgs.push( \"\" + o[k] );\n"
- "}\n"
- "}\n"
- "}\n"
- "else {\n"
- "for ( var i=0; i<args.length; i++ )\n"
- "fullArgs.push( args[i] )\n"
- "}\n"
- "return fullArgs;\n"
- "}\n"
- "startMongod = function(){\n"
- "var args = createMongoArgs( \"mongod\" , arguments );\n"
- "var dbpath = _parsePath.apply( null, args );\n"
- "resetDbpath( dbpath );\n"
- "return startMongoProgram.apply( null, args );\n"
- "}\n"
- "startMongos = function(){\n"
- "return startMongoProgram.apply( null, createMongoArgs( \"mongos\" , arguments ) );\n"
- "}\n"
- "startMongoProgram = function(){\n"
- "var port = _parsePort.apply( null, arguments );\n"
- "_startMongoProgram.apply( null, arguments );\n"
- "var m;\n"
- "assert.soon\n"
- "( function() {\n"
- "try {\n"
- "m = new Mongo( \"127.0.0.1:\" + port );\n"
- "return true;\n"
- "} catch( e ) {\n"
- "}\n"
- "return false;\n"
- "}, \"unable to connect to mongo program on port \" + port, 30000 );\n"
- "return m;\n"
- "}\n"
- "startMongoProgramNoConnect = function() {\n"
- "return _startMongoProgram.apply( null, arguments );\n"
- "}\n"
- "myPort = function() {\n"
- "var m = db.getMongo();\n"
- "if ( m.host.match( /:/ ) )\n"
- "return m.host.match( /:(.*)/ )[ 1 ];\n"
- "else\n"
- "return 27017;\n"
- "}\n"
- "ShardingTest = function( testName , numServers , verboseLevel , numMongos ){\n"
- "this._connections = [];\n"
- "this._serverNames = [];\n"
- "for ( var i=0; i<numServers; i++){\n"
- "var conn = startMongod( { port : 30000 + i , dbpath : \"/data/db/\" + testName + i , noprealloc : \"\" } );\n"
- "conn.name = \"localhost:\" + ( 30000 + i );\n"
- "this._connections.push( conn );\n"
- "this._serverNames.push( conn.name );\n"
- "}\n"
- "this._configDB = \"localhost:30000\";\n"
- "this._mongos = [];\n"
- "var startMongosPort = 39999;\n"
- "for ( var i=0; i<(numMongos||1); i++ ){\n"
- "var myPort = startMongosPort - i;\n"
- "var conn = startMongos( { port : startMongosPort - i , v : verboseLevel || 0 , configdb : this._configDB } );\n"
- "conn.name = \"localhost:\" + myPort;\n"
- "this._mongos.push( conn );\n"
- "if ( i == 0 )\n"
- "this.s = conn;\n"
- "}\n"
- "var admin = this.admin = this.s.getDB( \"admin\" );\n"
- "this.config = this.s.getDB( \"config\" );\n"
- "this._serverNames.forEach(\n"
- "function(z){\n"
- "admin.runCommand( { addshard : z } );\n"
- "}\n"
- ");\n"
- "}\n"
- "ShardingTest.prototype.getDB = function( name ){\n"
- "return this.s.getDB( name );\n"
- "}\n"
- "ShardingTest.prototype.getServerName = function( dbname ){\n"
- "return this.config.databases.findOne( { name : dbname } ).primary;\n"
- "}\n"
- "ShardingTest.prototype.getServer = function( dbname ){\n"
- "var name = this.getServerName( dbname );\n"
- "for ( var i=0; i<this._serverNames.length; i++ ){\n"
- "if ( name == this._serverNames[i] )\n"
- "return this._connections[i];\n"
- "}\n"
- "throw \"can't find server for: \" + dbname + \" name:\" + name;\n"
- "}\n"
- "ShardingTest.prototype.getOther = function( one ){\n"
- "if ( this._connections.length != 2 )\n"
- "throw \"getOther only works with 2 servers\";\n"
- "if ( this._connections[0] == one )\n"
- "return this._connections[1];\n"
- "return this._connections[0];\n"
- "}\n"
- "ShardingTest.prototype.stop = function(){\n"
- "for ( var i=0; i<this._mongos.length; i++ ){\n"
- "stopMongoProgram( 39999 - i );\n"
- "}\n"
- "for ( var i=0; i<this._connections.length; i++){\n"
- "stopMongod( 30000 + i );\n"
- "}\n"
- "}\n"
- "ShardingTest.prototype.adminCommand = function(cmd){\n"
- "var res = this.admin.runCommand( cmd );\n"
- "if ( res && res.ok == 1 )\n"
- "return true;\n"
- "throw \"command \" + tojson( cmd ) + \" failed: \" + tojson( res );\n"
- "}\n"
- "ShardingTest.prototype.getChunksString = function( ns ){\n"
- "var q = {}\n"
- "if ( ns )\n"
- "q.ns = ns;\n"
- "return Array.tojson( this.config.chunks.find( q ).toArray() , \"\\n\" );\n"
- "}\n"
- "ShardingTest.prototype.printChunks = function( ns ){\n"
- "print( this.getChunksString( ns ) );\n"
- "}\n"
- "ShardingTest.prototype.sync = function(){\n"
- "this.adminCommand( \"connpoolsync\" );\n"
- "}\n"
- "MongodRunner = function( port, dbpath, peer, arbiter, extraArgs ) {\n"
- "this.port_ = port;\n"
- "this.dbpath_ = dbpath;\n"
- "this.peer_ = peer;\n"
- "this.arbiter_ = arbiter;\n"
- "this.extraArgs_ = extraArgs;\n"
- "}\n"
- "MongodRunner.prototype.start = function( reuseData ) {\n"
- "var args = [];\n"
- "if ( reuseData ) {\n"
- "args.push( \"mongod\" );\n"
- "}\n"
- "args.push( \"--port\" );\n"
- "args.push( this.port_ );\n"
- "args.push( \"--dbpath\" );\n"
- "args.push( this.dbpath_ );\n"
- "if ( this.peer_ && this.arbiter_ ) {\n"
- "args.push( \"--pairwith\" );\n"
- "args.push( this.peer_ );\n"
- "args.push( \"--arbiter\" );\n"
- "args.push( this.arbiter_ );\n"
- "args.push( \"--oplogSize\" );\n"
- "args.push( \"1\" );\n"
- "}\n"
- "args.push( \"--nohttpinterface\" );\n"
- "args.push( \"--noprealloc\" );\n"
- "args.push( \"--bind_ip\" );\n"
- "args.push( \"127.0.0.1\" );\n"
- "if ( this.extraArgs_ ) {\n"
- "args = args.concat( this.extraArgs_ );\n"
- "}\n"
- "if ( reuseData ) {\n"
- "return startMongoProgram.apply( null, args );\n"
- "} else {\n"
- "return startMongod.apply( null, args );\n"
- "}\n"
- "}\n"
- "MongodRunner.prototype.port = function() { return this.port_; }\n"
- "MongodRunner.prototype.toString = function() { return [ this.port_, this.dbpath_, this.peer_, this.arbiter_ ].toString(); }\n"
- "ReplPair = function( left, right, arbiter ) {\n"
- "this.left_ = left;\n"
- "this.leftC_ = null;\n"
- "this.right_ = right;\n"
- "this.rightC_ = null;\n"
- "this.arbiter_ = arbiter;\n"
- "this.arbiterC_ = null;\n"
- "this.master_ = null;\n"
- "this.slave_ = null;\n"
- "}\n"
- "ReplPair.prototype.start = function( reuseData ) {\n"
- "if ( this.arbiterC_ == null ) {\n"
- "this.arbiterC_ = this.arbiter_.start();\n"
- "}\n"
- "if ( this.leftC_ == null ) {\n"
- "this.leftC_ = this.left_.start( reuseData );\n"
- "}\n"
- "if ( this.rightC_ == null ) {\n"
- "this.rightC_ = this.right_.start( reuseData );\n"
- "}\n"
- "}\n"
- "ReplPair.prototype.isMaster = function( mongo, debug ) {\n"
- "var im = mongo.getDB( \"admin\" ).runCommand( { ismaster : 1 } );\n"
- "assert( im && im.ok, \"command ismaster failed\" );\n"
- "if ( debug ) {\n"
- "printjson( im );\n"
- "}\n"
- "return im.ismaster;\n"
- "}\n"
- "ReplPair.prototype.isInitialSyncComplete = function( mongo, debug ) {\n"
- "var isc = mongo.getDB( \"admin\" ).runCommand( { isinitialsynccomplete : 1 } );\n"
- "assert( isc && isc.ok, \"command isinitialsynccomplete failed\" );\n"
- "if ( debug ) {\n"
- "printjson( isc );\n"
- "}\n"
- "return isc.initialsynccomplete;\n"
- "}\n"
- "ReplPair.prototype.checkSteadyState = function( state, expectedMasterHost, twoMasterOk, leftValues, rightValues, debug ) {\n"
- "leftValues = leftValues || {};\n"
- "rightValues = rightValues || {};\n"
- "var lm = null;\n"
- "var lisc = null;\n"
- "if ( this.leftC_ != null ) {\n"
- "lm = this.isMaster( this.leftC_, debug );\n"
- "leftValues[ lm ] = true;\n"
- "lisc = this.isInitialSyncComplete( this.leftC_, debug );\n"
- "}\n"
- "var rm = null;\n"
- "var risc = null;\n"
- "if ( this.rightC_ != null ) {\n"
- "rm = this.isMaster( this.rightC_, debug );\n"
- "rightValues[ rm ] = true;\n"
- "risc = this.isInitialSyncComplete( this.rightC_, debug );\n"
- "}\n"
- "var stateSet = {}\n"
- "state.forEach( function( i ) { stateSet[ i ] = true; } );\n"
- "if ( !( 1 in stateSet ) || ( ( risc || risc == null ) && ( lisc || lisc == null ) ) ) {\n"
- "if ( rm == 1 && lm != 1 ) {\n"
- "assert( twoMasterOk || !( 1 in leftValues ) );\n"
- "this.master_ = this.rightC_;\n"
- "this.slave_ = this.leftC_;\n"
- "} else if ( lm == 1 && rm != 1 ) {\n"
- "assert( twoMasterOk || !( 1 in rightValues ) );\n"
- "this.master_ = this.leftC_;\n"
- "this.slave_ = this.rightC_;\n"
- "}\n"
- "if ( !twoMasterOk ) {\n"
- "assert( lm != 1 || rm != 1, \"two masters\" );\n"
- "}\n"
- "if ( state.sort().toString() == [ lm, rm ].sort().toString() ) {\n"
- "if ( expectedMasterHost != null ) {\n"
- "if( expectedMasterHost == this.master_.host ) {\n"
- "return true;\n"
- "}\n"
- "} else {\n"
- "return true;\n"
- "}\n"
- "}\n"
- "}\n"
- "this.master_ = null;\n"
- "this.slave_ = null;\n"
- "return false;\n"
- "}\n"
- "ReplPair.prototype.waitForSteadyState = function( state, expectedMasterHost, twoMasterOk, debug ) {\n"
- "state = state || [ 1, 0 ];\n"
- "twoMasterOk = twoMasterOk || false;\n"
- "var rp = this;\n"
- "var leftValues = {};\n"
- "var rightValues = {};\n"
- "assert.soon( function() { return rp.checkSteadyState( state, expectedMasterHost, twoMasterOk, leftValues, rightValues, debug ); },\n"
- "\"rp (\" + rp + \") failed to reach expected steady state (\" + state + \")\" );\n"
- "}\n"
- "ReplPair.prototype.master = function() { return this.master_; }\n"
- "ReplPair.prototype.slave = function() { return this.slave_; }\n"
- "ReplPair.prototype.right = function() { return this.rightC_; }\n"
- "ReplPair.prototype.left = function() { return this.leftC_; }\n"
- "ReplPair.prototype.killNode = function( mongo, signal ) {\n"
- "signal = signal || 15;\n"
- "if ( this.leftC_ != null && this.leftC_.host == mongo.host ) {\n"
- "stopMongod( this.left_.port_ );\n"
- "this.leftC_ = null;\n"
- "}\n"
- "if ( this.rightC_ != null && this.rightC_.host == mongo.host ) {\n"
- "stopMongod( this.right_.port_ );\n"
- "this.rightC_ = null;\n"
- "}\n"
- "}\n"
- "ReplPair.prototype._annotatedNode = function( mongo ) {\n"
- "var ret = \"\";\n"
- "if ( mongo != null ) {\n"
- "ret += \" (connected)\";\n"
- "if ( this.master_ != null && mongo.host == this.master_.host ) {\n"
- "ret += \"(master)\";\n"
- "}\n"
- "if ( this.slave_ != null && mongo.host == this.slave_.host ) {\n"
- "ret += \"(slave)\";\n"
- "}\n"
- "}\n"
- "return ret;\n"
- "}\n"
- "ReplPair.prototype.toString = function() {\n"
- "var ret = \"\";\n"
- "ret += \"left: \" + this.left_;\n"
- "ret += \" \" + this._annotatedNode( this.leftC_ );\n"
- "ret += \" right: \" + this.right_;\n"
- "ret += \" \" + this._annotatedNode( this.rightC_ );\n"
- "return ret;\n"
- "}\n"
- "friendlyEqual = function( a , b ){\n"
- "if ( a == b )\n"
- "return true;\n"
- "if ( tojson( a ) == tojson( b ) )\n"
- "return true;\n"
- "return false;\n"
- "}\n"
- "doassert = function( msg ){\n"
- "print( \"assert: \" + msg );\n"
- "throw msg;\n"
- "}\n"
- "assert = function( b , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( b )\n"
- "return;\n"
- "doassert( \"assert failed : \" + msg );\n"
- "}\n"
- "assert._debug = false;\n"
- "assert.eq = function( a , b , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( a == b )\n"
- "return;\n"
- "if ( ( a != null && b != null ) && friendlyEqual( a , b ) )\n"
- "return;\n"
- "doassert( \"[\" + tojson( a ) + \"] != [\" + tojson( b ) + \"] are not equal : \" + msg );\n"
- "}\n"
- "assert.neq = function( a , b , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( a != b )\n"
- "return;\n"
- "doassert( \"[\" + a + \"] != [\" + b + \"] are equal : \" + msg );\n"
- "}\n"
- "assert.soon = function( f, msg, timeout, interval ) {\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "var start = new Date();\n"
- "timeout = timeout || 30000;\n"
- "interval = interval || 200;\n"
- "var last;\n"
- "while( 1 ) {\n"
- "if ( typeof( f ) == \"string\" ){\n"
- "if ( eval( f ) )\n"
- "return;\n"
- "}\n"
+ "if ( k.indexOf( \".\" ) >= 0 ) {\n"
+ "throw \"can't have . in field names [\" + k + \"]\" ;}\n"
+ "if ( k.indexOf( \"$\" ) == 0 && ! DBCollection._allowedFields[k] ) {\n"
+ "throw \"field names cannot start with $ [\" + k + \"]\";}\n"
+ "if ( o[k] !== null && typeof( o[k] ) === \"object\" ) {\n"
+ "this._validateForStorage( o[k] );}}\n"
+ "};\n"
+ "DBCollection.prototype.find = function( query , fields , limit , skip ){\n"
+ "return new DBQuery( this._mongo , this._db , this ,\n"
+ "this._fullName , this._massageObject( query ) , fields , limit , skip );}\n"
+ "DBCollection.prototype.findOne = function( query , fields ){\n"
+ "var cursor = this._mongo.find( this._fullName , this._massageObject( query ) || {} , fields , -1 , 0 , 0 );\n"
+ "if ( ! cursor.hasNext() )\n"
+ "return null;\n"
+ "var ret = cursor.next();\n"
+ "if ( cursor.hasNext() ) throw \"findOne has more than 1 result!\";\n"
+ "if ( ret.$err )\n"
+ "throw \"error \" + tojson( ret );\n"
+ "return ret;}\n"
+ "DBCollection.prototype.insert = function( obj , _allow_dot ){\n"
+ "if ( ! obj )\n"
+ "throw \"no object passed to insert!\";\n"
+ "if ( ! _allow_dot ) {\n"
+ "this._validateForStorage( obj );}\n"
+ "if ( typeof( obj._id ) == \"undefined\" ){\n"
+ "var tmp = obj;\n"
+ "obj = {_id: new ObjectId()};\n"
+ "for (var key in tmp){\n"
+ "obj[key] = tmp[key];}}\n"
+ "this._mongo.insert( this._fullName , obj );\n"
+ "return obj._id;}\n"
+ "DBCollection.prototype.remove = function( t ){\n"
+ "this._mongo.remove( this._fullName , this._massageObject( t ) );}\n"
+ "DBCollection.prototype.update = function( query , obj , upsert , multi ){\n"
+ "assert( query , \"need a query\" );\n"
+ "assert( obj , \"need an object\" );\n"
+ "this._validateObject( obj );\n"
+ "this._mongo.update( this._fullName , query , obj , upsert ? true : false , multi ? true : false );}\n"
+ "DBCollection.prototype.save = function( obj ){\n"
+ "if ( obj == null || typeof( obj ) == \"undefined\" )\n"
+ "throw \"can't save a null\";\n"
+ "if ( typeof( obj._id ) == \"undefined\" ){\n"
+ "obj._id = new ObjectId();\n"
+ "return this.insert( obj );}\n"
"else {\n"
- "if ( f() )\n"
- "return;\n"
- "}\n"
- "if ( ( new Date() ).getTime() - start.getTime() > timeout )\n"
- "doassert( \"assert.soon failed: \" + f + \", msg:\" + msg );\n"
- "sleep( interval );\n"
- "}\n"
- "}\n"
- "assert.throws = function( func , params , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "try {\n"
- "func.apply( null , params );\n"
- "}\n"
- "catch ( e ){\n"
- "return e;\n"
- "}\n"
- "doassert( \"did not throw exception: \" + msg );\n"
- "}\n"
- "assert.commandWorked = function( res , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( res.ok == 1 )\n"
- "return;\n"
- "doassert( \"command failed: \" + tojson( res ) + \" : \" + msg );\n"
- "}\n"
- "assert.commandFailed = function( res , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( res.ok == 0 )\n"
- "return;\n"
- "doassert( \"command worked when it should have failed: \" + tojson( res ) + \" : \" + msg );\n"
- "}\n"
- "assert.isnull = function( what , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( what == null )\n"
- "return;\n"
- "doassert( \"supposed to null (\" + ( msg || \"\" ) + \") was: \" + tojson( what ) );\n"
- "}\n"
- "assert.lt = function( a , b , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( a < b )\n"
- "return;\n"
- "doassert( a + \" is not less than \" + b + \" : \" + msg );\n"
- "}\n"
- "assert.gt = function( a , b , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( a > b )\n"
- "return;\n"
- "doassert( a + \" is not greater than \" + b + \" : \" + msg );\n"
- "}\n"
- "Object.extend = function( dst , src ){\n"
- "for ( var k in src ){\n"
- "dst[k] = src[k];\n"
- "}\n"
- "return dst;\n"
- "}\n"
- "argumentsToArray = function( a ){\n"
- "var arr = [];\n"
- "for ( var i=0; i<a.length; i++ )\n"
- "arr[i] = a[i];\n"
- "return arr;\n"
- "}\n"
- "isString = function( x ){\n"
- "return typeof( x ) == \"string\";\n"
- "}\n"
- "isNumber = function(x){\n"
- "return typeof( x ) == \"number\";\n"
- "}\n"
- "isObject = function( x ){\n"
- "return typeof( x ) == \"object\";\n"
- "}\n"
- "String.prototype.trim = function() {\n"
- "return this.replace(/^\\s+|\\s+$/g,\"\");\n"
- "}\n"
- "String.prototype.ltrim = function() {\n"
- "return this.replace(/^\\s+/,\"\");\n"
- "}\n"
- "String.prototype.rtrim = function() {\n"
- "return this.replace(/\\s+$/,\"\");\n"
- "}\n"
- "Date.timeFunc = function( theFunc , numTimes ){\n"
- "var start = new Date();\n"
- "numTimes = numTimes || 1;\n"
- "for ( var i=0; i<numTimes; i++ ){\n"
- "theFunc.apply( null , argumentsToArray( arguments ).slice( 2 ) );\n"
- "}\n"
- "return (new Date()).getTime() - start.getTime();\n"
- "}\n"
- "Date.prototype.tojson = function(){\n"
- "return \"\\\"\" + this.toString() + \"\\\"\";\n"
- "}\n"
- "RegExp.prototype.tojson = RegExp.prototype.toString;\n"
- "Array.contains = function( a , x ){\n"
- "for ( var i=0; i<a.length; i++ ){\n"
- "if ( a[i] == x )\n"
- "return true;\n"
- "}\n"
- "return false;\n"
- "}\n"
- "Array.unique = function( a ){\n"
- "var u = [];\n"
- "for ( var i=0; i<a.length; i++){\n"
- "var o = a[i];\n"
- "if ( ! Array.contains( u , o ) ){\n"
- "u.push( o );\n"
- "}\n"
- "}\n"
- "return u;\n"
- "}\n"
- "Array.shuffle = function( arr ){\n"
- "for ( var i=0; i<arr.length-1; i++ ){\n"
- "var pos = i+Math.floor(Math.random()*(arr.length-i));\n"
- "var save = arr[i];\n"
- "arr[i] = arr[pos];\n"
- "arr[pos] = save;\n"
- "}\n"
- "return arr;\n"
- "}\n"
- "Array.tojson = function( a , sepLines ){\n"
- "var s = \"[\";\n"
- "if ( sepLines ) s += \"\\n\";\n"
- "for ( var i=0; i<a.length; i++){\n"
- "if ( i > 0 ){\n"
- "s += \",\";\n"
- "if ( sepLines ) s += \"\\n\";\n"
- "}\n"
- "s += tojson( a[i] );\n"
- "}\n"
- "s += \"]\";\n"
- "if ( sepLines ) s += \"\\n\";\n"
- "return s;\n"
- "}\n"
- "Array.fetchRefs = function( arr , coll ){\n"
- "var n = [];\n"
- "for ( var i=0; i<arr.length; i ++){\n"
- "var z = arr[i];\n"
- "if ( coll && coll != z.getCollection() )\n"
+ "return this.update( { _id : obj._id } , obj , true );}}\n"
+ "DBCollection.prototype._genIndexName = function( keys ){\n"
+ "var name = \"\";\n"
+ "for ( var k in keys ){\n"
+ "var v = keys[k];\n"
+ "if ( typeof v == \"function\" )\n"
"continue;\n"
- "n.push( z.fetch() );\n"
- "}\n"
- "return n;\n"
- "}\n"
- "if ( ! ObjectId.prototype )\n"
- "ObjectId.prototype = {}\n"
- "ObjectId.prototype.toString = function(){\n"
- "return this.str;\n"
- "}\n"
- "ObjectId.prototype.tojson = function(){\n"
- "return \" ObjectId( \\\"\" + this.str + \"\\\") \";\n"
- "}\n"
- "ObjectId.prototype.isObjectId = true;\n"
- "if ( typeof( DBPointer ) != \"undefined\" ){\n"
- "DBPointer.prototype.fetch = function(){\n"
- "assert( this.ns , \"need a ns\" );\n"
- "assert( this.id , \"need an id\" );\n"
- "return db[ this.ns ].findOne( { _id : this.id } );\n"
- "}\n"
- "DBPointer.prototype.tojson = function(){\n"
- "return \"{ 'ns' : \\\"\" + this.ns + \"\\\" , 'id' : \\\"\" + this.id + \"\\\" } \";\n"
- "}\n"
- "DBPointer.prototype.getCollection = function(){\n"
- "return this.ns;\n"
- "}\n"
- "DBPointer.prototype.toString = function(){\n"
- "return \"DBPointer \" + this.ns + \":\" + this.id;\n"
- "}\n"
- "}\n"
- "else {\n"
- "print( \"warning: no DBPointer\" );\n"
- "}\n"
- "if ( typeof( DBRef ) != \"undefined\" ){\n"
- "DBRef.prototype.fetch = function(){\n"
- "assert( this.$ref , \"need a ns\" );\n"
- "assert( this.$id , \"need an id\" );\n"
- "return db[ this.$ref ].findOne( { _id : this.$id } );\n"
- "}\n"
- "DBRef.prototype.tojson = function(){\n"
- "return \"{ '$ref' : \\\"\" + this.$ref + \"\\\" , '$id' : \\\"\" + this.$id + \"\\\" } \";\n"
- "}\n"
- "DBRef.prototype.getCollection = function(){\n"
- "return this.$ref;\n"
- "}\n"
- "DBRef.prototype.toString = function(){\n"
- "return this.tojson();\n"
- "}\n"
- "}\n"
+ "if ( name.length > 0 )\n"
+ "name += \"_\";\n"
+ "name += k + \"_\";\n"
+ "if ( typeof v == \"number\" )\n"
+ "name += v;}\n"
+ "return name;}\n"
+ "DBCollection.prototype._indexSpec = function( keys, options ) {\n"
+ "var ret = { ns : this._fullName , key : keys , name : this._genIndexName( keys ) };\n"
+ "if ( ! options ){}\n"
+ "else if ( typeof ( options ) == \"string\" )\n"
+ "ret.name = options;\n"
+ "else if ( typeof ( options ) == \"boolean\" )\n"
+ "ret.unique = true;\n"
+ "else if ( typeof ( options ) == \"object\" ){\n"
+ "if ( options.length ){\n"
+ "var nb = 0;\n"
+ "for ( var i=0; i<options.length; i++ ){\n"
+ "if ( typeof ( options[i] ) == \"string\" )\n"
+ "ret.name = options[i];\n"
+ "else if ( typeof( options[i] ) == \"boolean\" ){\n"
+ "if ( options[i] ){\n"
+ "if ( nb == 0 )\n"
+ "ret.unique = true;\n"
+ "if ( nb == 1 )\n"
+ "ret.dropDups = true;}\n"
+ "nb++;}}}\n"
"else {\n"
- "print( \"warning: no DBRef\" );\n"
- "}\n"
- "if ( typeof( BinData ) != \"undefined\" ){\n"
- "BinData.prototype.tojson = function(){\n"
- "return \"BinData type: \" + this.type + \" len: \" + this.len;\n"
- "}\n"
- "}\n"
+ "Object.extend( ret , options );}}\n"
"else {\n"
- "print( \"warning: no BinData\" );\n"
- "}\n"
- "tojson = function( x ){\n"
- "if ( x == null )\n"
- "return \"null\";\n"
- "if ( x == undefined )\n"
- "return \"\";\n"
- "switch ( typeof x ){\n"
- "case \"string\": {\n"
- "var s = \"\\\"\";\n"
- "for ( var i=0; i<x.length; i++ ){\n"
- "if ( x[i] == '\"' ){\n"
- "s += \"\\\\\\\"\";\n"
- "}\n"
- "else\n"
- "s += x[i];\n"
- "}\n"
- "return s + \"\\\"\";\n"
- "}\n"
- "case \"number\":\n"
- "case \"boolean\":\n"
- "return \"\" + x;\n"
- "case \"object\":\n"
- "return tojsonObject( x );\n"
- "case \"function\":\n"
- "return x.toString();\n"
- "default:\n"
- "throw \"tojson can't handle type \" + ( typeof x );\n"
- "}\n"
- "}\n"
- "tojsonObject = function( x ){\n"
- "assert.eq( ( typeof x ) , \"object\" , \"tojsonObject needs object, not [\" + ( typeof x ) + \"]\" );\n"
- "if ( typeof( x.tojson ) == \"function\" && x.tojson != tojson )\n"
- "return x.tojson();\n"
- "if ( typeof( x.constructor.tojson ) == \"function\" && x.constructor.tojson != tojson )\n"
- "return x.constructor.tojson( x );\n"
- "if ( x.toString() == \"[object MaxKey]\" )\n"
- "return \"{ $maxKey : 1 }\";\n"
- "if ( x.toString() == \"[object MinKey]\" )\n"
- "return \"{ $minKey : 1 }\";\n"
- "var s = \"{\";\n"
- "var first = true;\n"
- "for ( var k in x ){\n"
- "var val = x[k];\n"
- "if ( val == DB.prototype || val == DBCollection.prototype )\n"
- "continue;\n"
- "if ( first ) first = false;\n"
- "else s += \" , \";\n"
- "s += \"\\\"\" + k + \"\\\" : \" + tojson( val );\n"
- "}\n"
- "return s + \"}\";\n"
- "}\n"
- "shellPrint = function( x ){\n"
- "it = x;\n"
- "if ( x != undefined )\n"
- "shellPrintHelper( x );\n"
- "if ( db ){\n"
- "var e = db.getPrevError();\n"
- "if ( e.err ) {\n"
- "if( e.nPrev <= 1 )\n"
- "print( \"error on last call: \" + tojson( e.err ) );\n"
- "else\n"
- "print( \"an error \" + tojson(e.err) + \" occurred \" + e.nPrev + \" operations back in the command invocation\" );\n"
- "}\n"
- "db.resetError();\n"
- "}\n"
- "}\n"
- "printjson = function(x){\n"
- "print( tojson( x ) );\n"
- "}\n"
- "shellPrintHelper = function( x ){\n"
- "if ( typeof( x ) == \"undefined\" ){\n"
- "if ( typeof( db ) != \"undefined\" && db.getLastError ){\n"
- "var e = db.getLastError();\n"
- "if ( e != null )\n"
- "print( e );\n"
- "}\n"
- "return;\n"
- "}\n"
- "if ( x == null ){\n"
- "print( \"null\" );\n"
- "return;\n"
- "}\n"
- "if ( typeof x != \"object\" )\n"
- "return print( x );\n"
- "var p = x.shellPrint;\n"
- "if ( typeof p == \"function\" )\n"
- "return x.shellPrint();\n"
- "var p = x.tojson;\n"
- "if ( typeof p == \"function\" )\n"
- "print( x.tojson() );\n"
- "else\n"
- "print( tojson( x ) );\n"
- "}\n"
- "shellHelper = function( command , rest , shouldPrint ){\n"
- "command = command.trim();\n"
- "var args = rest.trim().replace(/;$/,\"\").split( \"\\s+\" );\n"
- "if ( ! shellHelper[command] )\n"
- "throw \"no command [\" + command + \"]\";\n"
- "var res = shellHelper[command].apply( null , args );\n"
- "if ( shouldPrint ){\n"
- "shellPrintHelper( res );\n"
- "}\n"
+ "throw \"can't handle: \" + typeof( options );}\n"
+ "/*\n"
+ "return ret;\n"
+ "var name;\n"
+ "var nTrue = 0;\n"
+ "if ( ! isObject( options ) ) {\n"
+ "options = [ options ];}\n"
+ "if ( options.length ){\n"
+ "for( var i = 0; i < options.length; ++i ) {\n"
+ "var o = options[ i ];\n"
+ "if ( isString( o ) ) {\n"
+ "ret.name = o;\n"
+ "} else if ( typeof( o ) == \"boolean\" ) {\n"
+ "if ( o ) {\n"
+ "++nTrue;}}}\n"
+ "if ( nTrue > 0 ) {\n"
+ "ret.unique = true;}\n"
+ "if ( nTrue > 1 ) {\n"
+ "ret.dropDups = true;}}\n"
+ "*/\n"
+ "return ret;}\n"
+ "DBCollection.prototype.createIndex = function( keys , options ){\n"
+ "var o = this._indexSpec( keys, options );\n"
+ "this._db.getCollection( \"system.indexes\" ).insert( o , true );}\n"
+ "DBCollection.prototype.ensureIndex = function( keys , options ){\n"
+ "var name = this._indexSpec( keys, options ).name;\n"
+ "this._indexCache = this._indexCache || {};\n"
+ "if ( this._indexCache[ name ] ){\n"
+ "return;}\n"
+ "this.createIndex( keys , options );\n"
+ "if ( this.getDB().getLastError() == \"\" ) {\n"
+ "this._indexCache[name] = true;}}\n"
+ "DBCollection.prototype.resetIndexCache = function(){\n"
+ "this._indexCache = {};}\n"
+ "DBCollection.prototype.reIndex = function() {\n"
+ "return this._db.runCommand({ reIndex: this.getName() });}\n"
+ "DBCollection.prototype.dropIndexes = function(){\n"
+ "this.resetIndexCache();\n"
+ "var res = this._db.runCommand( { deleteIndexes: this.getName(), index: \"*\" } );\n"
+ "assert( res , \"no result from dropIndex result\" );\n"
+ "if ( res.ok )\n"
"return res;\n"
- "}\n"
- "help = shellHelper.help = function(){\n"
- "print( \"HELP\" );\n"
- "print( \"\\t\" + \"show dbs show database names\");\n"
- "print( \"\\t\" + \"show collections show collections in current database\");\n"
- "print( \"\\t\" + \"show users show users in current database\");\n"
- "print( \"\\t\" + \"show profile show most recent system.profile entries with time >= 1ms\");\n"
- "print( \"\\t\" + \"use <db name> set curent database to <db name>\" );\n"
- "print( \"\\t\" + \"db.help() help on DB methods\");\n"
- "print( \"\\t\" + \"db.foo.help() help on collection methods\");\n"
- "print( \"\\t\" + \"db.foo.find() list objects in collection foo\" );\n"
- "print( \"\\t\" + \"db.foo.find( { a : 1 } ) list objects in foo where a == 1\" );\n"
- "print( \"\\t\" + \"it result of the last line evaluated; use to further iterate\");\n"
- "}\n"
- "shellHelper.use = function( dbname ){\n"
- "db = db.getMongo().getDB( dbname );\n"
- "print( \"switched to db \" + db.getName() );\n"
- "}\n"
- "shellHelper.it = function(){\n"
- "if ( typeof( ___it___ ) == \"undefined\" || ___it___ == null ){\n"
- "print( \"no cursor\" );\n"
- "return;\n"
- "}\n"
- "shellPrintHelper( ___it___ );\n"
- "}\n"
- "shellHelper.show = function( what ){\n"
- "assert( typeof what == \"string\" );\n"
- "if( what == \"profile\" ) {\n"
- "if( db.system.profile.count() == 0 ) {\n"
- "print(\"db.system.profile is empty\");\n"
- "print(\"Use db.setProfilingLevel(2) will enable profiling\");\n"
- "print(\"Use db.system.profile.find() to show raw profile entries\");\n"
- "}\n"
- "else {\n"
- "print();\n"
- "db.system.profile.find({ millis : { $gt : 0 } }).sort({$natural:-1}).limit(5).forEach( function(x){print(\"\"+x.millis+\"ms \" + String(x.ts).substring(0,24)); print(x.info); print(\"\\n\");} )\n"
- "}\n"
- "return \"\";\n"
- "}\n"
- "if ( what == \"users\" ){\n"
- "db.system.users.find().forEach( printjson );\n"
- "return \"\";\n"
- "}\n"
- "if ( what == \"collections\" || what == \"tables\" ) {\n"
- "db.getCollectionNames().forEach( function(x){print(x)} );\n"
- "return \"\";\n"
- "}\n"
- "if ( what == \"dbs\" ) {\n"
- "db.getMongo().getDBNames().sort().forEach( function(x){print(x)} );\n"
- "return \"\";\n"
- "}\n"
- "throw \"don't know how to show [\" + what + \"]\";\n"
- "}\n"
- "if ( typeof( Map ) == \"undefined\" ){\n"
- "Map = function(){\n"
- "this._data = {};\n"
- "}\n"
- "}\n"
- "Map.hash = function( val ){\n"
- "if ( ! val )\n"
- "return val;\n"
- "switch ( typeof( val ) ){\n"
- "case 'string':\n"
- "case 'number':\n"
- "case 'date':\n"
- "return val.toString();\n"
- "case 'object':\n"
- "case 'array':\n"
- "var s = \"\";\n"
- "for ( var k in val ){\n"
- "s += k + val[k];\n"
- "}\n"
- "return s;\n"
- "}\n"
- "throw \"can't hash : \" + typeof( val );\n"
- "}\n"
- "Map.prototype.put = function( key , value ){\n"
- "var o = this._get( key );\n"
- "var old = o.value;\n"
- "o.value = value;\n"
- "return old;\n"
- "}\n"
- "Map.prototype.get = function( key ){\n"
- "return this._get( key ).value;\n"
- "}\n"
- "Map.prototype._get = function( key ){\n"
- "var h = Map.hash( key );\n"
- "var a = this._data[h];\n"
- "if ( ! a ){\n"
- "a = [];\n"
- "this._data[h] = a;\n"
- "}\n"
- "for ( var i=0; i<a.length; i++ ){\n"
- "if ( friendlyEqual( key , a[i].key ) ){\n"
- "return a[i];\n"
- "}\n"
- "}\n"
- "var o = { key : key , value : null };\n"
- "a.push( o );\n"
- "return o;\n"
- "}\n"
- "Map.prototype.values = function(){\n"
- "var all = [];\n"
- "for ( var k in this._data ){\n"
- "this._data[k].forEach( function(z){ all.push( z.value ); } );\n"
- "}\n"
- "return all;\n"
- "}\n"
- "Math.sigFig = function( x , N ){\n"
- "if ( ! N ){\n"
- "N = 3;\n"
- "}\n"
- "var p = Math.pow( 10, N - Math.ceil( Math.log( Math.abs(x) ) / Math.log( 10 )) );\n"
- "return Math.round(x*p)/p;\n"
- "}\n"
+ "if ( res.errmsg.match( /not found/ ) )\n"
+ "return res;\n"
+ "throw \"error dropping indexes : \" + tojson( res );}\n"
+ "DBCollection.prototype.drop = function(){\n"
+ "this.resetIndexCache();\n"
+ "var ret = this._db.runCommand( { drop: this.getName() } );\n"
+ "if ( ! ret.ok ){\n"
+ "if ( ret.errmsg == \"ns not found\" )\n"
+ "return false;\n"
+ "throw \"drop failed: \" + tojson( ret );}\n"
+ "return true;}\n"
+ "DBCollection.prototype.findAndModify = function(args){\n"
+ "var cmd = { findandmodify: this.getName() };\n"
+ "for (var key in args){\n"
+ "cmd[key] = args[key];}\n"
+ "var ret = this._db.runCommand( cmd );\n"
+ "if ( ! ret.ok ){\n"
+ "if (ret.errmsg == \"No matching object found\"){\n"
+ "return {};}\n"
+ "throw \"findAndModifyFailed failed: \" + tojson( ret.errmsg );}\n"
+ "return ret.value;}\n"
+ "DBCollection.prototype.renameCollection = function( newName , dropTarget ){\n"
+ "return this._db._adminCommand( { renameCollection : this._fullName ,\n"
+ "to : this._db._name + \".\" + newName ,\n"
+ "dropTarget : dropTarget } )}\n"
+ "DBCollection.prototype.validate = function() {\n"
+ "var res = this._db.runCommand( { validate: this.getName() } );\n"
+ "res.valid = false;\n"
+ "if ( res.result ){\n"
+ "var str = \"-\" + tojson( res.result );\n"
+ "res.valid = ! ( str.match( /exception/ ) || str.match( /corrupt/ ) );\n"
+ "var p = /lastExtentSize:(\\d+)/;\n"
+ "var r = p.exec( str );\n"
+ "if ( r ){\n"
+ "res.lastExtentSize = Number( r[1] );}}\n"
+ "return res;}\n"
+ "DBCollection.prototype.getShardVersion = function(){\n"
+ "return this._db._adminCommand( { getShardVersion : this._fullName } );}\n"
+ "DBCollection.prototype.getIndexes = function(){\n"
+ "return this.getDB().getCollection( \"system.indexes\" ).find( { ns : this.getFullName() } ).toArray();}\n"
+ "DBCollection.prototype.getIndices = DBCollection.prototype.getIndexes;\n"
+ "DBCollection.prototype.getIndexSpecs = DBCollection.prototype.getIndexes;\n"
+ "DBCollection.prototype.getIndexKeys = function(){\n"
+ "return this.getIndexes().map(\n"
+ "function(i){\n"
+ "return i.key;}\n"
+ ");}\n"
+ "DBCollection.prototype.count = function( x ){\n"
+ "return this.find( x ).count();}\n"
+ "\n"
+ "DBCollection.prototype.clean = function() {\n"
+ "return this._dbCommand( { clean: this.getName() } );}\n"
+ "\n"
+ "DBCollection.prototype.dropIndex = function(index) {\n"
+ "assert(index , \"need to specify index to dropIndex\" );\n"
+ "if ( ! isString( index ) && isObject( index ) )\n"
+ "index = this._genIndexName( index );\n"
+ "var res = this._dbCommand( \"deleteIndexes\" ,{ index: index } );\n"
+ "this.resetIndexCache();\n"
+ "return res;}\n"
+ "DBCollection.prototype.copyTo = function( newName ){\n"
+ "return this.getDB().eval(\n"
+ "function( collName , newName ){\n"
+ "var from = db[collName];\n"
+ "var to = db[newName];\n"
+ "to.ensureIndex( { _id : 1 } );\n"
+ "var count = 0;\n"
+ "var cursor = from.find();\n"
+ "while ( cursor.hasNext() ){\n"
+ "var o = cursor.next();\n"
+ "count++;\n"
+ "to.save( o );}\n"
+ "return count;\n"
+ "} , this.getName() , newName\n"
+ ");}\n"
+ "DBCollection.prototype.getCollection = function( subName ){\n"
+ "return this._db.getCollection( this._shortName + \".\" + subName );}\n"
+ "DBCollection.prototype.stats = function( scale ){\n"
+ "return this._db.runCommand( { collstats : this._shortName , scale : scale } );}\n"
+ "DBCollection.prototype.dataSize = function(){\n"
+ "return this.stats().size;}\n"
+ "DBCollection.prototype.storageSize = function(){\n"
+ "return this.stats().storageSize;}\n"
+ "DBCollection.prototype.totalIndexSize = function( verbose ){\n"
+ "var stats = this.stats();\n"
+ "if (verbose){\n"
+ "for (var ns in stats.indexSizes){\n"
+ "print( ns + \"\\t\" + stats.indexSizes[ns] );}}\n"
+ "return stats.totalIndexSize;}\n"
+ "DBCollection.prototype.totalSize = function(){\n"
+ "var total = this.storageSize();\n"
+ "var mydb = this._db;\n"
+ "var shortName = this._shortName;\n"
+ "this.getIndexes().forEach(\n"
+ "function( spec ){\n"
+ "var coll = mydb.getCollection( shortName + \".$\" + spec.name );\n"
+ "var mysize = coll.storageSize();\n"
+ "total += coll.dataSize();}\n"
+ ");\n"
+ "return total;}\n"
+ "DBCollection.prototype.convertToCapped = function( bytes ){\n"
+ "if ( ! bytes )\n"
+ "throw \"have to specify # of bytes\";\n"
+ "return this._dbCommand( { convertToCapped : this._shortName , size : bytes } )}\n"
+ "DBCollection.prototype.exists = function(){\n"
+ "return this._db.system.namespaces.findOne( { name : this._fullName } );}\n"
+ "DBCollection.prototype.isCapped = function(){\n"
+ "var e = this.exists();\n"
+ "return ( e && e.options && e.options.capped ) ? true : false;}\n"
+ "DBCollection.prototype.distinct = function( keyString , query ){\n"
+ "var res = this._dbCommand( { distinct : this._shortName , key : keyString , query : query || {} } );\n"
+ "if ( ! res.ok )\n"
+ "throw \"distinct failed: \" + tojson( res );\n"
+ "return res.values;}\n"
+ "DBCollection.prototype.group = function( params ){\n"
+ "params.ns = this._shortName;\n"
+ "return this._db.group( params );}\n"
+ "DBCollection.prototype.groupcmd = function( params ){\n"
+ "params.ns = this._shortName;\n"
+ "return this._db.groupcmd( params );}\n"
+ "MapReduceResult = function( db , o ){\n"
+ "Object.extend( this , o );\n"
+ "this._o = o;\n"
+ "this._keys = Object.keySet( o );\n"
+ "this._db = db;\n"
+ "this._coll = this._db.getCollection( this.result );}\n"
+ "MapReduceResult.prototype._simpleKeys = function(){\n"
+ "return this._o;}\n"
+ "MapReduceResult.prototype.find = function(){\n"
+ "return DBCollection.prototype.find.apply( this._coll , arguments );}\n"
+ "MapReduceResult.prototype.drop = function(){\n"
+ "return this._coll.drop();}\n"
+ "\n"
+ "MapReduceResult.prototype.convertToSingleObject = function(){\n"
+ "var z = {};\n"
+ "this._coll.find().forEach( function(a){ z[a._id] = a.value; } );\n"
+ "return z;}\n"
+ "\n"
+ "DBCollection.prototype.mapReduce = function( map , reduce , optional ){\n"
+ "var c = { mapreduce : this._shortName , map : map , reduce : reduce };\n"
+ "if ( optional )\n"
+ "Object.extend( c , optional );\n"
+ "var raw = this._db.runCommand( c );\n"
+ "if ( ! raw.ok )\n"
+ "throw \"map reduce failed: \" + tojson( raw );\n"
+ "return new MapReduceResult( this._db , raw );}\n"
+ "DBCollection.prototype.toString = function(){\n"
+ "return this.getFullName();}\n"
+ "DBCollection.prototype.toString = function(){\n"
+ "return this.getFullName();}\n"
+ "DBCollection.prototype.tojson = DBCollection.prototype.toString;\n"
+ "DBCollection.prototype.shellPrint = DBCollection.prototype.toString;\n"
;
diff --git a/shell/query.js b/shell/query.js
index aabd12e..508fba2 100644
--- a/shell/query.js
+++ b/shell/query.js
@@ -1,7 +1,7 @@
// query.js
if ( typeof DBQuery == "undefined" ){
- DBQuery = function( mongo , db , collection , ns , query , fields , limit , skip ){
+ DBQuery = function( mongo , db , collection , ns , query , fields , limit , skip , batchSize ){
this._mongo = mongo; // 0
this._db = db; // 1
@@ -12,7 +12,8 @@ if ( typeof DBQuery == "undefined" ){
this._fields = fields; // 5
this._limit = limit || 0; // 6
this._skip = skip || 0; // 7
-
+ this._batchSize = batchSize || 0;
+
this._cursor = null;
this._numReturned = 0;
this._special = false;
@@ -36,7 +37,7 @@ DBQuery.prototype.help = function(){
DBQuery.prototype.clone = function(){
var q = new DBQuery( this._mongo , this._db , this._collection , this._ns ,
this._query , this._fields ,
- this._limit , this._skip );
+ this._limit , this._skip , this._batchSize );
q._special = this._special;
return q;
}
@@ -58,7 +59,7 @@ DBQuery.prototype._checkModify = function(){
DBQuery.prototype._exec = function(){
if ( ! this._cursor ){
assert.eq( 0 , this._numReturned );
- this._cursor = this._mongo.find( this._ns , this._query , this._fields , this._limit , this._skip );
+ this._cursor = this._mongo.find( this._ns , this._query , this._fields , this._limit , this._skip , this._batchSize );
this._cursorSeen = 0;
}
return this._cursor;
@@ -70,6 +71,13 @@ DBQuery.prototype.limit = function( limit ){
return this;
}
+DBQuery.prototype.batchSize = function( batchSize ){
+ this._checkModify();
+ this._batchSize = batchSize;
+ return this;
+}
+
+
DBQuery.prototype.skip = function( skip ){
this._checkModify();
this._skip = skip;
@@ -167,28 +175,26 @@ DBQuery.prototype.length = function(){
return this.toArray().length;
}
-DBQuery.prototype.sort = function( sortBy ){
+DBQuery.prototype._addSpecial = function( name , value ){
this._ensureSpecial();
- this._query.orderby = sortBy;
+ this._query[name] = value;
return this;
}
+DBQuery.prototype.sort = function( sortBy ){
+ return this._addSpecial( "orderby" , sortBy );
+}
+
DBQuery.prototype.hint = function( hint ){
- this._ensureSpecial();
- this._query["$hint"] = hint;
- return this;
+ return this._addSpecial( "$hint" , hint );
}
DBQuery.prototype.min = function( min ) {
- this._ensureSpecial();
- this._query["$min"] = min;
- return this;
+ return this._addSpecial( "$min" , min );
}
DBQuery.prototype.max = function( max ) {
- this._ensureSpecial();
- this._query["$max"] = max;
- return this;
+ return this._addSpecial( "$max" , max );
}
DBQuery.prototype.forEach = function( func ){
@@ -219,7 +225,7 @@ DBQuery.prototype.snapshot = function(){
this._ensureSpecial();
this._query.$snapshot = true;
return this;
- }
+}
DBQuery.prototype.shellPrint = function(){
try {
diff --git a/shell/servers.js b/shell/servers.js
index 109f871..f681263 100644
--- a/shell/servers.js
+++ b/shell/servers.js
@@ -32,6 +32,7 @@ createMongoArgs = function( binaryName , args ){
if ( k == "v" && isNumber( o[k] ) ){
var n = o[k];
if ( n > 0 ){
+ if ( n > 10 ) n = 10;
var temp = "-";
while ( n-- > 0 ) temp += "v";
fullArgs.push( temp );
@@ -52,6 +53,25 @@ createMongoArgs = function( binaryName , args ){
return fullArgs;
}
+startMongodTest = function( port , dirname , restart ){
+ var f = startMongod;
+ if ( restart )
+ f = startMongodNoReset;
+ var conn = f.apply( null , [
+ {
+ port : port ,
+ dbpath : "/data/db/" + dirname ,
+ noprealloc : "" ,
+ smallfiles : "" ,
+ oplogSize : "2" ,
+ nohttpinterface : ""
+ }
+ ]
+ );
+ conn.name = "localhost:" + port;
+ return conn;
+}
+
// Start a mongod instance and return a 'Mongo' object connected to it.
// This function's arguments are passed as command line arguments to mongod.
// The specified 'dbpath' is cleared if it exists, created if not.
@@ -65,6 +85,11 @@ startMongod = function(){
return startMongoProgram.apply( null, args );
}
+startMongodNoReset = function(){
+ var args = createMongoArgs( "mongod" , arguments );
+ return startMongoProgram.apply( null, args );
+}
+
startMongos = function(){
return startMongoProgram.apply( null, createMongoArgs( "mongos" , arguments ) );
}
@@ -87,7 +112,7 @@ startMongoProgram = function(){
} catch( e ) {
}
return false;
- }, "unable to connect to mongo program on port " + port, 30000 );
+ }, "unable to connect to mongo program on port " + port, 60000 );
return m;
}
@@ -107,24 +132,31 @@ myPort = function() {
return 27017;
}
-ShardingTest = function( testName , numServers , verboseLevel , numMongos ){
+ShardingTest = function( testName , numServers , verboseLevel , numMongos , otherParams ){
+ if ( ! otherParams )
+ otherParams = {}
this._connections = [];
- this._serverNames = [];
+
+ if ( otherParams.sync && numServers < 3 )
+ throw "if you want sync, you need at least 3 servers";
for ( var i=0; i<numServers; i++){
- var conn = startMongod( { port : 30000 + i , dbpath : "/data/db/" + testName + i ,
- noprealloc : "" , smallfiles : "" , oplogSize : "2" } );
- conn.name = "localhost:" + ( 30000 + i );
-
+ var conn = startMongodTest( 30000 + i , testName + i );
this._connections.push( conn );
- this._serverNames.push( conn.name );
}
- this._configDB = "localhost:30000";
-
+ if ( otherParams.sync ){
+ this._configDB = "localhost:30000,localhost:30001,localhost:30002";
+ this._configConnection = new Mongo( this._configDB );
+ this._configConnection.getDB( "config" ).settings.insert( { _id : "chunksize" , value : otherParams.chunksize || 50 } );
+ }
+ else {
+ this._configDB = "localhost:30000";
+ this._connections[0].getDB( "config" ).settings.insert( { _id : "chunksize" , value : otherParams.chunksize || 50 } );
+ }
this._mongos = [];
- var startMongosPort = 39999;
+ var startMongosPort = 31000;
for ( var i=0; i<(numMongos||1); i++ ){
var myPort = startMongosPort - i;
var conn = startMongos( { port : startMongosPort - i , v : verboseLevel || 0 , configdb : this._configDB } );
@@ -137,9 +169,9 @@ ShardingTest = function( testName , numServers , verboseLevel , numMongos ){
var admin = this.admin = this.s.getDB( "admin" );
this.config = this.s.getDB( "config" );
- this._serverNames.forEach(
+ this._connections.forEach(
function(z){
- admin.runCommand( { addshard : z , allowLocal : true } );
+ admin.runCommand( { addshard : z.name , allowLocal : true } );
}
);
}
@@ -154,9 +186,10 @@ ShardingTest.prototype.getServerName = function( dbname ){
ShardingTest.prototype.getServer = function( dbname ){
var name = this.getServerName( dbname );
- for ( var i=0; i<this._serverNames.length; i++ ){
- if ( name == this._serverNames[i] )
- return this._connections[i];
+ for ( var i=0; i<this._connections.length; i++ ){
+ var c = this._connections[i];
+ if ( name == c.name )
+ return c;
}
throw "can't find server for: " + dbname + " name:" + name;
@@ -171,9 +204,17 @@ ShardingTest.prototype.getOther = function( one ){
return this._connections[0];
}
+ShardingTest.prototype.getFirstOther = function( one ){
+ for ( var i=0; i<this._connections.length; i++ ){
+ if ( this._connections[i] != one )
+ return this._connections[i];
+ }
+ throw "impossible";
+}
+
ShardingTest.prototype.stop = function(){
for ( var i=0; i<this._mongos.length; i++ ){
- stopMongoProgram( 39999 - i );
+ stopMongoProgram( 31000 - i );
}
for ( var i=0; i<this._connections.length; i++){
stopMongod( 30000 + i );
@@ -322,6 +363,7 @@ MongodRunner.prototype.start = function( reuseData ) {
if ( this.extraArgs_ ) {
args = args.concat( this.extraArgs_ );
}
+ removeFile( this.dbpath_ + "/mongod.lock" );
if ( reuseData ) {
return startMongoProgram.apply( null, args );
} else {
@@ -551,7 +593,7 @@ ReplTest.prototype.getPath = function( master ){
}
-ReplTest.prototype.getOptions = function( master , extra , putBinaryFirst ){
+ReplTest.prototype.getOptions = function( master , extra , putBinaryFirst, norepl ){
if ( ! extra )
extra = {};
@@ -571,13 +613,15 @@ ReplTest.prototype.getOptions = function( master , extra , putBinaryFirst ){
a.push( this.getPath( master ) );
- if ( master ){
- a.push( "--master" );
- }
- else {
- a.push( "--slave" );
- a.push( "--source" );
- a.push( "127.0.0.1:" + this.ports[0] );
+ if ( !norepl ) {
+ if ( master ){
+ a.push( "--master" );
+ }
+ else {
+ a.push( "--slave" );
+ a.push( "--source" );
+ a.push( "127.0.0.1:" + this.ports[0] );
+ }
}
for ( var k in extra ){
@@ -590,8 +634,10 @@ ReplTest.prototype.getOptions = function( master , extra , putBinaryFirst ){
return a;
}
-ReplTest.prototype.start = function( master , options , restart ){
- var o = this.getOptions( master , options , restart );
+ReplTest.prototype.start = function( master , options , restart, norepl ){
+ var lockFile = this.getPath( master ) + "/mongod.lock";
+ removeFile( lockFile );
+ var o = this.getOptions( master , options , restart, norepl );
if ( restart )
return startMongoProgram.apply( null , o );
else
@@ -604,5 +650,53 @@ ReplTest.prototype.stop = function( master , signal ){
this.stop( false );
return;
}
- stopMongod( this.getPort( master ) , signal || 15 );
+ return stopMongod( this.getPort( master ) , signal || 15 );
+}
+
+allocatePorts = function( n ) {
+ var ret = [];
+ for( var i = 31000; i < 31000 + n; ++i )
+ ret.push( i );
+ return ret;
+}
+
+
+SyncCCTest = function( testName ){
+ this._testName = testName;
+ this._connections = [];
+
+ for ( var i=0; i<3; i++ ){
+ this._connections.push( startMongodTest( 30000 + i , testName + i ) );
+ }
+
+ this.url = this._connections.map( function(z){ return z.name; } ).join( "," );
+ this.conn = new Mongo( this.url );
+}
+
+SyncCCTest.prototype.stop = function(){
+ for ( var i=0; i<this._connections.length; i++){
+ stopMongod( 30000 + i );
+ }
+}
+
+SyncCCTest.prototype.checkHashes = function( dbname , msg ){
+ var hashes = this._connections.map(
+ function(z){
+ return z.getDB( dbname ).runCommand( "dbhash" );
+ }
+ );
+
+ for ( var i=1; i<hashes.length; i++ ){
+ assert.eq( hashes[0].md5 , hashes[i].md5 , "checkHash on " + dbname + " " + msg + "\n" + tojson( hashes ) )
+ }
+}
+
+SyncCCTest.prototype.tempKill = function( num ){
+ num = num || 0;
+ stopMongod( 30000 + num );
+}
+
+SyncCCTest.prototype.tempStart = function( num ){
+ num = num || 0;
+ this._connections[num] = startMongodTest( 30000 + num , this._testName + num , true );
}
diff --git a/shell/utils.cpp b/shell/utils.cpp
index c4735ee..b10c93d 100644
--- a/shell/utils.cpp
+++ b/shell/utils.cpp
@@ -1,4 +1,22 @@
// utils.cpp
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "../stdafx.h"
#include <boost/thread/xtime.hpp>
@@ -10,20 +28,32 @@
#include <map>
#include <sstream>
#include <vector>
+#include <fcntl.h>
-#ifndef _WIN32
-#include <sys/socket.h>
-#include <netinet/in.h>
+#ifdef _WIN32
+# include <io.h>
+# define SIGKILL 9
+#else
+# include <sys/socket.h>
+# include <netinet/in.h>
+# include <signal.h>
+# include <sys/stat.h>
+# include <sys/wait.h>
#endif
#include "../client/dbclient.h"
#include "../util/processinfo.h"
-#include "../util/md5.hpp"
#include "utils.h"
extern const char * jsconcatcode_server;
namespace mongo {
+#ifdef _WIN32
+ inline int close(int fd) { return _close(fd); }
+ inline int read(int fd, void* buf, size_t size) { return _read(fd, buf, size); }
+
+ inline int pipe(int fds[2]) { return _pipe(fds, 1024, _O_TEXT | _O_NOINHERIT); }
+#endif
namespace shellUtils {
@@ -73,6 +103,32 @@ namespace mongo {
return undefined_;
}
+
+ BSONObj Quit(const BSONObj& args) {
+ // If not arguments are given first element will be EOO, which
+ // converts to the integer value 0.
+ int exit_code = int( args.firstElement().number() );
+ ::exit(exit_code);
+ return undefined_;
+ }
+
+ BSONObj JSGetMemInfo( const BSONObj& args ){
+ ProcessInfo pi;
+ uassert( 10258 , "processinfo not supported" , pi.supported() );
+
+ BSONObjBuilder e;
+ e.append( "virtual" , pi.getVirtualMemorySize() );
+ e.append( "resident" , pi.getResidentSize() );
+
+ BSONObjBuilder b;
+ b.append( "ret" , e.obj() );
+
+ return b.obj();
+ }
+
+
+#ifndef MONGO_SAFE_SHELL
+
BSONObj listFiles(const BSONObj& args){
uassert( 10257 , "need to specify 1 argument to listFiles" , args.nFields() == 1 );
@@ -80,7 +136,10 @@ namespace mongo {
string rootname = args.firstElement().valuestrsafe();
path root( rootname );
- uassert( 12581, "listFiles: no such directory", boost::filesystem::exists( root ) );
+ stringstream ss;
+ ss << "listFiles: no such directory: " << rootname;
+ string msg = ss.str();
+ uassert( 12581, msg.c_str(), boost::filesystem::exists( root ) );
directory_iterator end;
directory_iterator i( root);
@@ -113,92 +172,34 @@ namespace mongo {
ret.appendArray( "", lst.done() );
return ret.obj();
}
-
- BSONObj Quit(const BSONObj& args) {
- // If not arguments are given first element will be EOO, which
- // converts to the integer value 0.
- int exit_code = int( args.firstElement().number() );
- ::exit(exit_code);
- return undefined_;
- }
- BSONObj JSGetMemInfo( const BSONObj& args ){
- ProcessInfo pi;
- uassert( 10258 , "processinfo not supported" , pi.supported() );
-
- BSONObjBuilder e;
- e.append( "virtual" , pi.getVirtualMemorySize() );
- e.append( "resident" , pi.getResidentSize() );
-
- BSONObjBuilder b;
- b.append( "ret" , e.obj() );
-
- return b.obj();
- }
- BSONObj JSVersion( const BSONObj& args ){
- cout << "version: " << versionString << endl;
- if ( strstr( versionString , "+" ) )
- printGitVersion();
- return BSONObj();
- }
-
-#ifndef _WIN32
-#include <signal.h>
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/wait.h>
-
- BSONObj AllocatePorts( const BSONObj &args ) {
- uassert( 10259 , "allocatePorts takes exactly 1 argument", args.nFields() == 1 );
- uassert( 10260 , "allocatePorts needs to be passed an integer", args.firstElement().isNumber() );
+ BSONObj removeFile(const BSONObj& args){
+ uassert( 12597 , "need to specify 1 argument to listFiles" , args.nFields() == 1 );
- int n = int( args.firstElement().number() );
+ bool found = false;
- vector< int > ports;
- vector< int > sockets;
- for( int i = 0; i < n; ++i ) {
- int s = socket( AF_INET, SOCK_STREAM, 0 );
- assert( s );
-
- sockaddr_in address;
- memset(address.sin_zero, 0, sizeof(address.sin_zero));
- address.sin_family = AF_INET;
- address.sin_port = 0;
- address.sin_addr.s_addr = inet_addr( "127.0.0.1" );
- assert( 0 == ::bind( s, (sockaddr*)&address, sizeof( address ) ) );
-
- sockaddr_in newAddress;
- socklen_t len = sizeof( newAddress );
- assert( 0 == getsockname( s, (sockaddr*)&newAddress, &len ) );
- ports.push_back( ntohs( newAddress.sin_port ) );
- sockets.push_back( s );
+ path root( args.firstElement().valuestrsafe() );
+ if ( boost::filesystem::exists( root ) ){
+ found = true;
+ boost::filesystem::remove_all( root );
}
- for( vector< int >::const_iterator i = sockets.begin(); i != sockets.end(); ++i )
- assert( 0 == close( *i ) );
-
- sort( ports.begin(), ports.end() );
- for( unsigned i = 1; i < ports.size(); ++i )
- massert( 10434 , "duplicate ports allocated", ports[ i - 1 ] != ports[ i ] );
+
BSONObjBuilder b;
- b.append( "", ports );
+ b.appendBool( "removed" , found );
return b.obj();
}
-
map< int, pair< pid_t, int > > dbs;
map< pid_t, int > shells;
+#ifdef _WIN32
+ map< pid_t, HANDLE > handles;
+#endif
- char *copyString( const char *original ) {
- char *ret = reinterpret_cast< char * >( malloc( strlen( original ) + 1 ) );
- strcpy( ret, original );
- return ret;
- }
-
- boost::mutex &mongoProgramOutputMutex( *( new boost::mutex ) );
+ mongo::mutex mongoProgramOutputMutex;
stringstream mongoProgramOutput_;
-
+
void writeMongoProgramOutputLine( int port, int pid, const char *line ) {
- boost::mutex::scoped_lock lk( mongoProgramOutputMutex );
+ mongo::mutex::scoped_lock lk( mongoProgramOutputMutex );
stringstream buf;
if ( port > 0 )
buf << "m" << port << "| " << line;
@@ -208,38 +209,52 @@ namespace mongo {
mongoProgramOutput_ << buf.str() << endl;
}
+ // only returns last 100000 characters
BSONObj RawMongoProgramOutput( const BSONObj &args ) {
- boost::mutex::scoped_lock lk( mongoProgramOutputMutex );
- return BSON( "" << mongoProgramOutput_.str() );
+ mongo::mutex::scoped_lock lk( mongoProgramOutputMutex );
+ string out = mongoProgramOutput_.str();
+ size_t len = out.length();
+ if ( len > 100000 )
+ out = out.substr( len - 100000, 100000 );
+ return BSON( "" << out );
}
-
- class MongoProgramRunner {
- char **argv_;
+
+ BSONObj ClearRawMongoProgramOutput( const BSONObj &args ) {
+ mongo::mutex::scoped_lock lk( mongoProgramOutputMutex );
+ mongoProgramOutput_.str( "" );
+ return undefined_;
+ }
+
+ class ProgramRunner {
+ vector<string> argv_;
int port_;
int pipe_;
pid_t pid_;
public:
pid_t pid() const { return pid_; }
- MongoProgramRunner( const BSONObj &args ) {
- assert( args.nFields() > 0 );
+ ProgramRunner( const BSONObj &args , bool isMongoProgram=true)
+ {
+ assert( !args.isEmpty() );
+
string program( args.firstElement().valuestrsafe() );
-
assert( !program.empty() );
- boost::filesystem::path programPath = ( boost::filesystem::path( argv0 ) ).branch_path() / program;
- massert( 10435 , "couldn't find " + programPath.native_file_string(), boost::filesystem::exists( programPath ) );
+ boost::filesystem::path programPath = program;
+
+ if (isMongoProgram){
+ programPath = boost::filesystem::initial_path() / programPath;
+#ifdef _WIN32
+ programPath = change_extension(programPath, ".exe");
+#endif
+ massert( 10435 , "couldn't find " + programPath.native_file_string(), boost::filesystem::exists( programPath ) );
+ }
+
+ argv_.push_back( programPath.native_file_string() );
port_ = -1;
- argv_ = new char *[ args.nFields() + 1 ];
- {
- string s = programPath.native_file_string();
- if ( s == program )
- s = "./" + s;
- argv_[ 0 ] = copyString( s.c_str() );
- }
BSONObjIterator j( args );
- j.next();
- for( int i = 1; i < args.nFields(); ++i ) {
+ j.next(); // skip program name (handled above)
+ while(j.more()) {
BSONElement e = j.next();
string str;
if ( e.isNumber() ) {
@@ -250,14 +265,12 @@ namespace mongo {
assert( e.type() == mongo::String );
str = e.valuestr();
}
- char *s = copyString( str.c_str() );
- if ( string( "--port" ) == s )
+ if ( str == "--port" )
port_ = -2;
else if ( port_ == -2 )
- port_ = strtol( s, 0, 10 );
- argv_[ i ] = s;
+ port_ = strtol( str.c_str(), 0, 10 );
+ argv_.push_back(str);
}
- argv_[ args.nFields() ] = 0;
if ( program != "mongod" && program != "mongos" && program != "mongobridge" )
port_ = 0;
@@ -274,27 +287,12 @@ namespace mongo {
assert( pipe( pipeEnds ) != -1 );
fflush( 0 );
- pid_ = fork();
- assert( pid_ != -1 );
-
- if ( pid_ == 0 ) {
-
- assert( dup2( pipeEnds[ 1 ], STDOUT_FILENO ) != -1 );
- assert( dup2( pipeEnds[ 1 ], STDERR_FILENO ) != -1 );
- execvp( argv_[ 0 ], argv_ );
- massert( 10436 , "Unable to start program" , 0 );
- }
+ launch_process(pipeEnds[1]); //sets pid_
cout << "shell: started mongo program";
- int i = 0;
- while( argv_[ i ] )
- cout << " " << argv_[ i++ ];
+ for (unsigned i=0; i < argv_.size(); i++)
+ cout << " " << argv_[i];
cout << endl;
-
- i = 0;
- while( argv_[ i ] )
- free( argv_[ i++ ] );
- free( argv_ );
if ( port_ > 0 )
dbs.insert( make_pair( port_, make_pair( pid_, pipeEnds[ 1 ] ) ) );
@@ -332,30 +330,136 @@ namespace mongo {
strcpy( temp, last );
strcpy( buf, temp );
} else {
- assert( strlen( buf ) < 1023 );
+ assert( strlen( buf ) <= 1023 );
}
start = buf + strlen( buf );
}
}
+ void launch_process(int child_stdout){
+#ifdef _WIN32
+ stringstream ss;
+ for (int i=0; i < argv_.size(); i++){
+ if (i) ss << ' ';
+ if (argv_[i].find(' ') == string::npos)
+ ss << argv_[i];
+ else
+ ss << '"' << argv_[i] << '"';
+ }
+
+ string args = ss.str();
+
+ boost::scoped_array<TCHAR> args_tchar (new TCHAR[args.size() + 1]);
+ for (size_t i=0; i < args.size()+1; i++)
+ args_tchar[i] = args[i];
+
+ HANDLE h = (HANDLE)_get_osfhandle(child_stdout);
+ assert(h != INVALID_HANDLE_VALUE);
+ assert(SetHandleInformation(h, HANDLE_FLAG_INHERIT, 1));
+
+ STARTUPINFO si;
+ ZeroMemory(&si, sizeof(si));
+ si.cb = sizeof(si);
+ si.hStdError = h;
+ si.hStdOutput = h;
+ si.dwFlags |= STARTF_USESTDHANDLES;
+
+ PROCESS_INFORMATION pi;
+ ZeroMemory(&pi, sizeof(pi));
+
+ bool success = CreateProcess( NULL, args_tchar.get(), NULL, NULL, true, 0, NULL, NULL, &si, &pi);
+ assert(success);
+
+ CloseHandle(pi.hThread);
+
+ pid_ = pi.dwProcessId;
+ handles.insert( make_pair( pid_, pi.hProcess ) );
+
+#else
+
+ pid_ = fork();
+ assert( pid_ != -1 );
+
+ if ( pid_ == 0 ) {
+ // DON'T ASSERT IN THIS BLOCK - very bad things will happen
+
+ const char** argv = new const char* [argv_.size()+1]; // don't need to free - in child
+ for (unsigned i=0; i < argv_.size(); i++){
+ argv[i] = argv_[i].c_str();
+ }
+ argv[argv_.size()] = 0;
+
+ if ( dup2( child_stdout, STDOUT_FILENO ) == -1 ||
+ dup2( child_stdout, STDERR_FILENO ) == -1 )
+ {
+ cout << "Unable to dup2 child output: " << OUTPUT_ERRNO << endl;
+ ::_Exit(-1); //do not pass go, do not call atexit handlers
+ }
+
+ execvp( argv[ 0 ], const_cast<char**>(argv) );
+
+ cout << "Unable to start program: " << OUTPUT_ERRNO << endl;
+ ::_Exit(-1);
+ }
+
+#endif
+ }
};
+ //returns true if process exited
+ bool wait_for_pid(pid_t pid, bool block=true, int* exit_code=NULL){
+#ifdef _WIN32
+ assert(handles.count(pid));
+ HANDLE h = handles[pid];
+
+ if (block)
+ WaitForSingleObject(h, INFINITE);
+
+ DWORD tmp;
+ if(GetExitCodeProcess(h, &tmp)){
+ CloseHandle(h);
+ handles.erase(pid);
+ if (exit_code)
+ *exit_code = tmp;
+ return true;
+ }else{
+ return false;
+ }
+#else
+ int tmp;
+ bool ret = (pid == waitpid(pid, &tmp, (block ? 0 : WNOHANG)));
+ if (exit_code)
+ *exit_code = WEXITSTATUS(tmp);
+ return ret;
+
+#endif
+ }
BSONObj StartMongoProgram( const BSONObj &a ) {
- MongoProgramRunner r( a );
+ _nokillop = true;
+ ProgramRunner r( a );
r.start();
boost::thread t( r );
return BSON( string( "" ) << int( r.pid() ) );
}
BSONObj RunMongoProgram( const BSONObj &a ) {
- MongoProgramRunner r( a );
+ ProgramRunner r( a );
r.start();
boost::thread t( r );
- int temp;
- waitpid( r.pid() , &temp , 0 );
+ wait_for_pid(r.pid());
shells.erase( r.pid() );
return BSON( string( "" ) << int( r.pid() ) );
}
+ BSONObj RunProgram(const BSONObj &a) {
+ ProgramRunner r( a, false );
+ r.start();
+ boost::thread t( r );
+ int exit_code;
+ wait_for_pid(r.pid(), true, &exit_code);
+ shells.erase( r.pid() );
+ return BSON( string( "" ) << exit_code );
+ }
+
BSONObj ResetDbpath( const BSONObj &a ) {
assert( a.nFields() == 1 );
string path = a.firstElement().valuestrsafe();
@@ -366,19 +470,73 @@ namespace mongo {
return undefined_;
}
- void killDb( int port, pid_t _pid, int signal ) {
+ void copyDir( const path &from, const path &to ) {
+ directory_iterator end;
+ directory_iterator i( from );
+ while( i != end ) {
+ path p = *i;
+ if ( p.leaf() != "mongod.lock" ) {
+ if ( is_directory( p ) ) {
+ path newDir = to / p.leaf();
+ boost::filesystem::create_directory( newDir );
+ copyDir( p, newDir );
+ } else {
+ boost::filesystem::copy_file( p, to / p.leaf() );
+ }
+ }
+ ++i;
+ }
+ }
+
+ // NOTE target dbpath will be cleared first
+ BSONObj CopyDbpath( const BSONObj &a ) {
+ assert( a.nFields() == 2 );
+ BSONObjIterator i( a );
+ string from = i.next().str();
+ string to = i.next().str();
+ assert( !from.empty() );
+ assert( !to.empty() );
+ if ( boost::filesystem::exists( to ) )
+ boost::filesystem::remove_all( to );
+ boost::filesystem::create_directory( to );
+ copyDir( from, to );
+ return undefined_;
+ }
+
+ inline void kill_wrapper(pid_t pid, int sig, int port){
+#ifdef _WIN32
+ if (sig == SIGKILL || port == 0){
+ assert( handles.count(pid) );
+ TerminateProcess(handles[pid], 1); // returns failure for "zombie" processes.
+ }else{
+ DBClientConnection conn;
+ conn.connect("127.0.0.1:" + BSONObjBuilder::numStr(port));
+ try {
+ conn.simpleCommand("admin", NULL, "shutdown");
+ } catch (...) {
+ //Do nothing. This command never returns data to the client and the driver doesn't like that.
+ }
+ }
+#else
+ assert( 0 == kill( pid, sig ) );
+#endif
+ }
+
+
+ int killDb( int port, pid_t _pid, int signal ) {
pid_t pid;
+ int exitCode = 0;
if ( port > 0 ) {
if( dbs.count( port ) != 1 ) {
cout << "No db started on port: " << port << endl;
- return;
+ return 0;
}
pid = dbs[ port ].first;
} else {
pid = _pid;
}
- assert( 0 == kill( pid, signal ) );
+ kill_wrapper( pid, signal, port );
int i = 0;
for( ; i < 65; ++i ) {
@@ -387,11 +545,9 @@ namespace mongo {
time_t_to_String(time(0), now);
now[ 20 ] = 0;
cout << now << " process on port " << port << ", with pid " << pid << " not terminated, sending sigkill" << endl;
- assert( 0 == kill( pid, SIGKILL ) );
+ kill_wrapper( pid, SIGKILL, port );
}
- int temp;
- int ret = waitpid( pid, &temp, WNOHANG );
- if ( ret == pid )
+ if(wait_for_pid(pid, false, &exitCode))
break;
sleepms( 1000 );
}
@@ -402,7 +558,7 @@ namespace mongo {
cout << now << " failed to terminate process on port " << port << ", with pid " << pid << endl;
assert( "Failed to terminate process" == 0 );
}
-
+
if ( port > 0 ) {
close( dbs[ port ].second );
dbs.erase( port );
@@ -413,6 +569,8 @@ namespace mongo {
if ( i > 4 || signal == SIGKILL ) {
sleepms( 4000 ); // allow operating system to reclaim resources
}
+
+ return exitCode;
}
int getSignal( const BSONObj &a ) {
@@ -431,18 +589,18 @@ namespace mongo {
assert( a.nFields() == 1 || a.nFields() == 2 );
assert( a.firstElement().isNumber() );
int port = int( a.firstElement().number() );
- killDb( port, 0, getSignal( a ) );
+ int code = killDb( port, 0, getSignal( a ) );
cout << "shell: stopped mongo program on port " << port << endl;
- return undefined_;
+ return BSON( "" << code );
}
BSONObj StopMongoProgramByPid( const BSONObj &a ) {
assert( a.nFields() == 1 || a.nFields() == 2 );
assert( a.firstElement().isNumber() );
int pid = int( a.firstElement().number() );
- killDb( 0, pid, getSignal( a ) );
+ int code = killDb( 0, pid, getSignal( a ) );
cout << "shell: stopped mongo program on pid " << pid << endl;
- return undefined_;
+ return BSON( "" << code );
}
void KillMongoProgramInstances() {
@@ -457,39 +615,23 @@ namespace mongo {
for( vector< pid_t >::iterator i = pids.begin(); i != pids.end(); ++i )
killDb( 0, *i, SIGTERM );
}
+#else // ndef MONGO_SAFE_SHELL
+ void KillMongoProgramInstances() {}
+#endif
MongoProgramScope::~MongoProgramScope() {
- try {
+ DESTRUCTOR_GUARD(
KillMongoProgramInstances();
- } catch ( ... ) {
- assert( false );
- }
+ ClearRawMongoProgramOutput( BSONObj() );
+ )
}
-#else
- MongoProgramScope::~MongoProgramScope() {}
- void KillMongoProgramInstances() {}
-#endif
-
- BSONObj jsmd5( const BSONObj &a ){
- uassert( 10261 , "js md5 needs a string" , a.firstElement().type() == String );
- const char * s = a.firstElement().valuestrsafe();
-
- md5digest d;
- md5_state_t st;
- md5_init(&st);
- md5_append( &st , (const md5_byte_t*)s , strlen( s ) );
- md5_finish(&st, d);
-
- return BSON( "" << digestToString( d ) );
- }
-
unsigned _randomSeed;
BSONObj JSSrand( const BSONObj &a ) {
uassert( 12518, "srand requires a single numeric argument",
a.nFields() == 1 && a.firstElement().isNumber() );
- _randomSeed = a.firstElement().numberLong(); // grab least significant digits
+ _randomSeed = (unsigned)a.firstElement().numberLong(); // grab least significant digits
return undefined_;
}
@@ -503,25 +645,40 @@ namespace mongo {
#endif
return BSON( "" << double( r ) / ( double( RAND_MAX ) + 1 ) );
}
+
+ BSONObj isWindows(const BSONObj& a){
+ uassert( 13006, "isWindows accepts no arguments", a.nFields() == 0 );
+#ifdef _WIN32
+ return BSON( "" << true );
+#else
+ return BSON( "" << false );
+#endif
+ }
void installShellUtils( Scope& scope ){
- scope.injectNative( "listFiles" , listFiles );
scope.injectNative( "sleep" , JSSleep );
scope.injectNative( "quit", Quit );
scope.injectNative( "getMemInfo" , JSGetMemInfo );
- scope.injectNative( "version" , JSVersion );
- scope.injectNative( "hex_md5" , jsmd5 );
scope.injectNative( "_srand" , JSSrand );
scope.injectNative( "_rand" , JSRand );
-#if !defined(_WIN32)
- scope.injectNative( "allocatePorts", AllocatePorts );
+ scope.injectNative( "_isWindows" , isWindows );
+
+#ifndef MONGO_SAFE_SHELL
+ //can't launch programs
scope.injectNative( "_startMongoProgram", StartMongoProgram );
+ scope.injectNative( "runProgram", RunProgram );
scope.injectNative( "runMongoProgram", RunMongoProgram );
scope.injectNative( "stopMongod", StopMongoProgram );
scope.injectNative( "stopMongoProgram", StopMongoProgram );
scope.injectNative( "stopMongoProgramByPid", StopMongoProgramByPid );
- scope.injectNative( "resetDbpath", ResetDbpath );
scope.injectNative( "rawMongoProgramOutput", RawMongoProgramOutput );
+ scope.injectNative( "clearRawMongoProgramOutput", ClearRawMongoProgramOutput );
+
+ //can't access filesystem
+ scope.injectNative( "removeFile" , removeFile );
+ scope.injectNative( "listFiles" , listFiles );
+ scope.injectNative( "resetDbpath", ResetDbpath );
+ scope.injectNative( "copyDbpath", CopyDbpath );
#endif
}
@@ -533,9 +690,24 @@ namespace mongo {
if ( !_dbConnect.empty() ) {
uassert( 12513, "connect failed", scope.exec( _dbConnect , "(connect)" , false , true , false ) );
if ( !_dbAuth.empty() ) {
+ installGlobalUtils( scope );
uassert( 12514, "login failed", scope.exec( _dbAuth , "(auth)" , true , true , false ) );
}
}
}
+
+ map< const void*, string > _allMyUris;
+ bool _nokillop = false;
+ void onConnect( DBClientWithCommands &c ) {
+ if ( _nokillop ) {
+ return;
+ }
+ BSONObj info;
+ if ( c.runCommand( "admin", BSON( "whatsmyuri" << 1 ), info ) ) {
+ // There's no way to explicitly disconnect a DBClientConnection, but we might allocate
+ // a new uri on automatic reconnect. So just store one uri per connection.
+ _allMyUris[ &c ] = info[ "you" ].str();
+ }
+ }
}
}
diff --git a/shell/utils.h b/shell/utils.h
index 7c98e2c..a2d420d 100644
--- a/shell/utils.h
+++ b/shell/utils.h
@@ -1,19 +1,37 @@
// utils.h
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
#pragma once
#include "../scripting/engine.h"
namespace mongo {
-
+
namespace shellUtils {
extern std::string _dbConnect;
extern std::string _dbAuth;
+ extern map< const void*, string > _allMyUris;
+ extern bool _nokillop;
void RecordMyLocation( const char *_argv0 );
void installShellUtils( Scope& scope );
-
+
// Scoped management of mongo program instances. Simple implementation:
// destructor kills all mongod instances created by the shell.
struct MongoProgramScope {
@@ -23,5 +41,6 @@ namespace mongo {
void KillMongoProgramInstances();
void initScope( Scope &scope );
+ void onConnect( DBClientWithCommands &c );
}
}
diff --git a/shell/utils.js b/shell/utils.js
index 2d59b80..027ba0d 100644
--- a/shell/utils.js
+++ b/shell/utils.js
@@ -132,6 +132,17 @@ assert.gt = function( a , b , msg ){
doassert( a + " is not greater than " + b + " : " + msg );
}
+assert.close = function( a , b , msg , places ){
+ if (places === undefined) {
+ places = 4;
+ }
+ if (Math.round((a - b) * Math.pow(10, places)) === 0) {
+ return;
+ }
+ doassert( a + " is not equal to " + b + " within " + places +
+ " places, diff: " + (a-b) + " : " + msg );
+};
+
Object.extend = function( dst , src , deep ){
for ( var k in src ){
var v = src[k];
@@ -477,7 +488,8 @@ if ( typeof _threadInject != "undefined" ){
"jstests/extent.js",
"jstests/indexb.js",
"jstests/profile1.js",
- "jstests/mr3.js"] );
+ "jstests/mr3.js",
+ "jstests/apitest_db.js"] );
// some tests can't be run in parallel with each other
var serialTestsArr = [ "jstests/fsync.js",
@@ -576,10 +588,10 @@ if ( typeof _threadInject != "undefined" ){
}
tojson = function( x, indent , nolint ){
- if ( x == null )
+ if ( x === null )
return "null";
- if ( x == undefined )
+ if ( x === undefined )
return "undefined";
if (!indent)
@@ -905,3 +917,39 @@ Random.setRandomSeed = function( s ) {
Random.genExp = function( mean ) {
return -Math.log( Random.rand() ) * mean;
}
+
+killWithUris = function( uris ) {
+ var inprog = db.currentOp().inprog;
+ for( var u in uris ) {
+ for ( var i in inprog ) {
+ if ( uris[ u ] == inprog[ i ].client ) {
+ db.killOp( inprog[ i ].opid );
+ }
+ }
+ }
+}
+
+Geo = {};
+Geo.distance = function( a , b ){
+ var ax = null;
+ var ay = null;
+ var bx = null;
+ var by = null;
+
+ for ( var key in a ){
+ if ( ax == null )
+ ax = a[key];
+ else if ( ay == null )
+ ay = a[key];
+ }
+
+ for ( var key in b ){
+ if ( bx == null )
+ bx = b[key];
+ else if ( by == null )
+ by = b[key];
+ }
+
+ return Math.sqrt( Math.pow( by - ay , 2 ) +
+ Math.pow( bx - ax , 2 ) );
+}
diff --git a/stdafx.cpp b/stdafx.cpp
index 4d21953..0a80de6 100644
--- a/stdafx.cpp
+++ b/stdafx.cpp
@@ -32,6 +32,6 @@
namespace mongo {
- const char versionString[] = "1.3.1";
+ const char versionString[] = "1.4.0";
} // namespace mongo
diff --git a/stdafx.h b/stdafx.h
index 5352c5e..fb7a630 100644
--- a/stdafx.h
+++ b/stdafx.h
@@ -53,6 +53,7 @@ namespace mongo {
EXIT_OOM_MALLOC = 42 ,
EXIT_OOM_REALLOC = 43 ,
EXIT_FS = 45 ,
+ EXIT_CLOCK_SKEW = 47 ,
EXIT_POSSIBLE_CORRUPTION = 60 , // this means we detected a possible corruption situation, like a buf overflow
EXIT_UNCAUGHT = 100 , // top level exception that wasn't caught
EXIT_TEST = 101 ,
diff --git a/tools/bridge.cpp b/tools/bridge.cpp
index 42c3287..5535719 100644
--- a/tools/bridge.cpp
+++ b/tools/bridge.cpp
@@ -106,6 +106,8 @@ void check( bool b ) {
}
int main( int argc, char **argv ) {
+ static StaticObserver staticObserver;
+
setupSignals();
check( argc == 5 );
diff --git a/tools/dump.cpp b/tools/dump.cpp
index 4cbd2e1..52e95ce 100644
--- a/tools/dump.cpp
+++ b/tools/dump.cpp
@@ -28,7 +28,7 @@ namespace po = boost::program_options;
class Dump : public Tool {
public:
- Dump() : Tool( "dump" , "*" ){
+ Dump() : Tool( "dump" , true , "*" ){
add_options()
("out,o", po::value<string>()->default_value("dump"), "output directory")
;
@@ -39,7 +39,7 @@ public:
ofstream out;
out.open( outputFile.string().c_str() , ios_base::out | ios_base::binary );
- uassert( 10262 , "couldn't open file" , out.good() );
+ ASSERT_STREAM_GOOD( 10262 , "couldn't open file" , out );
ProgressMeter m( conn( true ).count( coll.c_str() , BSONObj() , QueryOption_SlaveOk ) );
diff --git a/tools/export.cpp b/tools/export.cpp
index ad27f27..aabebf3 100644
--- a/tools/export.cpp
+++ b/tools/export.cpp
@@ -54,7 +54,7 @@ public:
string dir = outfile.substr( 0 , idx + 1 );
create_directories( dir );
}
- ofstream * s = new ofstream( outfile.c_str() , ios_base::out | ios_base::binary );
+ ofstream * s = new ofstream( outfile.c_str() , ios_base::out );
fileStream.reset( s );
outPtr = s;
if ( ! s->good() ){
@@ -120,7 +120,7 @@ public:
}
- cout << "exported " << num << " records" << endl;
+ cerr << "exported " << num << " records" << endl;
return 0;
}
diff --git a/tools/files.cpp b/tools/files.cpp
index e69a070..2cbda12 100644
--- a/tools/files.cpp
+++ b/tools/files.cpp
@@ -139,12 +139,14 @@ public:
}
+ conn().getLastError();
cout << "done!";
return 0;
}
if ( cmd == "delete" ){
g.removeFile(filename);
+ conn().getLastError();
cout << "done!";
return 0;
}
diff --git a/tools/import.cpp b/tools/import.cpp
index 47cbe32..e34e73d 100644
--- a/tools/import.cpp
+++ b/tools/import.cpp
@@ -76,28 +76,44 @@ class Import : public Tool {
}
pos++;
- int skip = 1;
+ bool done = false;
+ string data;
char * end;
if ( _type == CSV && line[0] == '"' ){
- line++;
- end = strstr( line , "\"" );
- skip = 2;
- }
- else {
+ line++; //skip first '"'
+
+ while (true) {
+ end = strchr( line , '"' );
+ if (!end){
+ data += line;
+ done = true;
+ break;
+ } else if (end[1] == '"') {
+ // two '"'s get appended as one
+ data.append(line, end-line+1); //include '"'
+ line = end+2; //skip both '"'s
+ } else if (end[-1] == '\\') {
+ // "\\\"" gets appended as '"'
+ data.append(line, end-line-1); //exclude '\\'
+ data.append("\"");
+ line = end+1; //skip the '"'
+ } else {
+ data.append(line, end-line);
+ line = end+2; //skip '"' and ','
+ break;
+ }
+ }
+ } else {
end = strstr( line , _sep );
+ if ( ! end ){
+ done = true;
+ data = string( line );
+ } else {
+ data = string( line , end - line );
+ line = end+1;
+ }
}
-
- bool done = false;
- string data;
- if ( ! end ){
- done = true;
- data = string( line );
- }
- else {
- data = string( line , end - line );
- }
-
if ( _headerLine ){
while ( isspace( data[0] ) )
data = data.substr( 1 );
@@ -108,7 +124,6 @@ class Import : public Tool {
if ( done )
break;
- line = end + skip;
}
return b.obj();
}
@@ -135,7 +150,7 @@ public:
istream * in = &cin;
- ifstream file( filename.c_str() , ios_base::in | ios_base::binary);
+ ifstream file( filename.c_str() , ios_base::in);
if ( filename.size() > 0 && filename != "-" ){
if ( ! exists( filename ) ){
@@ -201,7 +216,7 @@ public:
log(1) << "filesize: " << fileSize << endl;
ProgressMeter pm( fileSize );
const int BUF_SIZE = 1024 * 1024 * 4;
- boost::scoped_array<char> line(new char[BUF_SIZE]);
+ boost::scoped_array<char> line(new char[BUF_SIZE+2]);
while ( *in ){
char * buf = line.get();
in->getline( buf , BUF_SIZE );
@@ -214,6 +229,8 @@ public:
if ( ! len )
continue;
+ buf[len+1] = 0;
+
if ( in->rdstate() == ios_base::eofbit )
break;
assert( in->rdstate() == 0 );
@@ -238,6 +255,8 @@ public:
}
cout << "imported " << num << " objects" << endl;
+
+ conn().getLastError();
if ( errors == 0 )
return 0;
diff --git a/tools/restore.cpp b/tools/restore.cpp
index 19e3a26..6fcf2d3 100644
--- a/tools/restore.cpp
+++ b/tools/restore.cpp
@@ -31,7 +31,15 @@ namespace po = boost::program_options;
class Restore : public Tool {
public:
- Restore() : Tool( "restore" , "" , "" ){
+
+ bool _drop;
+ bool _objcheck;
+
+ Restore() : Tool( "restore" , true , "" , "" ) , _drop(false),_objcheck(false){
+ add_options()
+ ("drop" , "drop each collection before import" )
+ ("objcheck" , "validate object before inserting" )
+ ;
add_hidden_options()
("dir", po::value<string>()->default_value("dump"), "directory to restore from")
;
@@ -45,6 +53,8 @@ public:
int run(){
auth();
path root = getParam("dir");
+ _drop = hasParam( "drop" );
+ _objcheck = hasParam( "objcheck" );
/* If _db is not "" then the user specified a db name to restore as.
*
@@ -56,6 +66,7 @@ public:
* .bson file, or a single .bson file itself (a collection).
*/
drillDown(root, _db != "", _coll != "");
+ conn().getLastError();
return EXIT_CLEAN;
}
@@ -128,6 +139,11 @@ public:
out() << "\t going into namespace [" << ns << "]" << endl;
+ if ( _drop ){
+ out() << "\t dropping" << endl;
+ conn().dropCollection( ns );
+ }
+
string fileString = root.string();
ifstream file( fileString.c_str() , ios_base::in | ios_base::binary);
if ( ! file.is_open() ){
@@ -141,7 +157,8 @@ public:
long long num = 0;
const int BUF_SIZE = 1024 * 1024 * 5;
- char * buf = (char*)malloc( BUF_SIZE );
+ boost::scoped_array<char> buf_holder(new char[BUF_SIZE]);
+ char * buf = buf_holder.get();
ProgressMeter m( fileLength );
@@ -156,6 +173,22 @@ public:
file.read( buf + 4 , size - 4 );
BSONObj o( buf );
+ if ( _objcheck && ! o.valid() ){
+ cerr << "INVALID OBJECT - going try and pring out " << endl;
+ cerr << "size: " << size << endl;
+ BSONObjIterator i(o);
+ while ( i.more() ){
+ BSONElement e = i.next();
+ try {
+ e.validate();
+ }
+ catch ( ... ){
+ cerr << "\t\t NEXT ONE IS INVALID" << endl;
+ }
+ cerr << "\t name : " << e.fieldName() << " " << e.type() << endl;
+ cerr << "\t " << e << endl;
+ }
+ }
conn().insert( ns.c_str() , o );
read += o.objsize();
@@ -164,8 +197,6 @@ public:
m.hit( o.objsize() );
}
- free( buf );
-
uassert( 10265 , "counts don't match" , m.done() == fileLength );
out() << "\t " << m.hits() << " objects" << endl;
}
diff --git a/tools/sniffer.cpp b/tools/sniffer.cpp
index 9590d8f..14d32bd 100644
--- a/tools/sniffer.cpp
+++ b/tools/sniffer.cpp
@@ -1,4 +1,20 @@
// sniffer.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
/*
TODO:
@@ -20,6 +36,7 @@
#include "../util/builder.h"
#include "../util/message.h"
+#include "../util/mmap.h"
#include "../db/dbmessage.h"
#include "../client/dbclient.h"
@@ -50,6 +67,7 @@ using mongo::BSONObj;
using mongo::BufBuilder;
using mongo::DBClientConnection;
using mongo::QueryResult;
+using mongo::MemoryMappedFile;
#define SNAP_LEN 65535
@@ -129,10 +147,12 @@ map< Connection, bool > seen;
map< Connection, int > bytesRemainingInMessage;
map< Connection, boost::shared_ptr< BufBuilder > > messageBuilder;
map< Connection, unsigned > expectedSeq;
-map< Connection, DBClientConnection* > forwarder;
+map< Connection, boost::shared_ptr<DBClientConnection> > forwarder;
map< Connection, long long > lastCursor;
map< Connection, map< long long, long long > > mapCursor;
+void processMessage( Connection& c , Message& d );
+
void got_packet(u_char *args, const struct pcap_pkthdr *header, const u_char *packet){
const struct sniff_ip* ip = (struct sniff_ip*)(packet + captureHeaderSize);
@@ -224,6 +244,12 @@ void got_packet(u_char *args, const struct pcap_pkthdr *header, const u_char *pa
<< " " << m.data->len << " bytes "
<< " id:" << hex << m.data->id << dec << "\t" << m.data->id;
+ processMessage( c , m );
+}
+
+void processMessage( Connection& c , Message& m ){
+ DbMessage d(m);
+
if ( m.data->operation() == mongo::opReply )
cout << " - " << m.data->responseTo;
cout << endl;
@@ -281,11 +307,9 @@ void got_packet(u_char *args, const struct pcap_pkthdr *header, const u_char *pa
if ( !forwardAddress.empty() ) {
if ( m.data->operation() != mongo::opReply ) {
- DBClientConnection *conn = forwarder[ c ];
+ boost::shared_ptr<DBClientConnection> conn = forwarder[ c ];
if ( !conn ) {
- // These won't get freed on error, oh well hopefully we'll just
- // abort in that case anyway.
- conn = new DBClientConnection( true );
+ conn.reset(new DBClientConnection( true ));
conn->connect( forwardAddress );
forwarder[ c ] = conn;
}
@@ -328,13 +352,41 @@ void got_packet(u_char *args, const struct pcap_pkthdr *header, const u_char *pa
}
}
+void processDiagLog( const char * file ){
+ Connection c;
+ MemoryMappedFile f;
+ long length;
+
+ char * root = (char*)f.map( file , length , MemoryMappedFile::SEQUENTIAL );
+ assert( root );
+ assert( length > 0 );
+
+ char * pos = root;
+
+ long read = 0;
+ while ( read < length ){
+ Message m(pos,false);
+ int len = m.data->len;
+ DbMessage d(m);
+ cout << len << " " << d.getns() << endl;
+
+ processMessage( c , m );
+
+ read += len;
+ pos += len;
+ }
+
+ f.close();
+}
+
void usage() {
cout <<
- "Usage: mongosniff [--help] [--forward host:port] [--source (NET <interface> | FILE <filename>)] [<port0> <port1> ... ]\n"
+ "Usage: mongosniff [--help] [--forward host:port] [--source (NET <interface> | (FILE | DIAGLOG) <filename>)] [<port0> <port1> ... ]\n"
"--forward Forward all parsed request messages to mongod instance at \n"
" specified host:port\n"
"--source Source of traffic to sniff, either a network interface or a\n"
- " file containing perviously captured packets, in pcap format.\n"
+ " file containing previously captured packets in pcap format,\n"
+ " or a file containing output from mongod's --diaglog option.\n"
" If no source is specified, mongosniff will attempt to sniff\n"
" from one of the machine's network interfaces.\n"
"<port0>... These parameters are used to filter sniffing. By default, \n"
@@ -352,9 +404,10 @@ int main(int argc, char **argv){
struct bpf_program fp;
bpf_u_int32 mask;
bpf_u_int32 net;
-
+
bool source = false;
bool replay = false;
+ bool diaglog = false;
const char *file = 0;
vector< const char * > args;
@@ -367,18 +420,22 @@ int main(int argc, char **argv){
if ( arg == string( "--help" ) ) {
usage();
return 0;
- } else if ( arg == string( "--forward" ) ) {
+ }
+ else if ( arg == string( "--forward" ) ) {
forwardAddress = args[ ++i ];
- } else if ( arg == string( "--source" ) ) {
+ }
+ else if ( arg == string( "--source" ) ) {
uassert( 10266 , "can't use --source twice" , source == false );
uassert( 10267 , "source needs more args" , args.size() > i + 2);
source = true;
replay = ( args[ ++i ] == string( "FILE" ) );
- if ( replay )
+ diaglog = ( args[ i ] == string( "DIAGLOG" ) );
+ if ( replay || diaglog )
file = args[ ++i ];
else
dev = args[ ++i ];
- } else {
+ }
+ else {
serverPorts.insert( atoi( args[ i ] ) );
}
}
@@ -389,8 +446,19 @@ int main(int argc, char **argv){
if ( !serverPorts.size() )
serverPorts.insert( 27017 );
-
- if ( !replay ) {
+
+ if ( diaglog ){
+ processDiagLog( file );
+ return 0;
+ }
+ else if ( replay ){
+ handle = pcap_open_offline(file, errbuf);
+ if ( ! handle ){
+ cerr << "error opening capture file!" << endl;
+ return -1;
+ }
+ }
+ else {
if ( !dev ) {
dev = pcap_lookupdev(errbuf);
if ( ! dev ) {
@@ -408,13 +476,7 @@ int main(int argc, char **argv){
cerr << "error opening device: " << errbuf << endl;
return -1;
}
- } else {
- handle = pcap_open_offline(file, errbuf);
- if ( ! handle ){
- cerr << "error opening capture file!" << endl;
- return -1;
- }
- }
+ }
switch ( pcap_datalink( handle ) ){
case DLT_EN10MB:
@@ -440,9 +502,6 @@ int main(int argc, char **argv){
pcap_freecode(&fp);
pcap_close(handle);
- for( map< Connection, DBClientConnection* >::iterator i = forwarder.begin(); i != forwarder.end(); ++i )
- free( i->second );
-
return 0;
}
diff --git a/tools/stat.cpp b/tools/stat.cpp
new file mode 100644
index 0000000..f66f3f1
--- /dev/null
+++ b/tools/stat.cpp
@@ -0,0 +1,194 @@
+// stat.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "stdafx.h"
+#include "client/dbclient.h"
+#include "db/json.h"
+
+#include "tool.h"
+
+#include <fstream>
+#include <iostream>
+
+#include <boost/program_options.hpp>
+
+namespace po = boost::program_options;
+
+namespace mongo {
+
+ class Stat : public Tool {
+ public:
+
+ Stat() : Tool( "stat" , false , "admin" ){
+ _sleep = 1;
+ _rowNum = 0;
+ _showHeaders = true;
+
+ add_hidden_options()
+ ( "sleep" , po::value<int>() , "time to sleep between calls" )
+ ;
+ add_options()
+ ("noheaders", "don't output column names")
+ ("rowcount,n", po::value<int>()->default_value(0), "number of stats lines to print (0 for indefinite)")
+ ;
+
+ addPositionArg( "sleep" , 1 );
+ }
+
+ virtual void printExtraHelp( ostream & out ){
+ out << "usage: " << _name << " [options] [sleep time]" << endl;
+ out << "sleep time: time to wait (in seconds) between calls" << endl;
+ }
+
+ BSONObj stats(){
+ BSONObj out;
+ if ( ! conn().simpleCommand( _db , &out , "serverStatus" ) ){
+ cout << "error: " << out << endl;
+ return BSONObj();
+ }
+ return out.getOwned();
+ }
+
+ double diff( const string& name , const BSONObj& a , const BSONObj& b ){
+ BSONElement x = a.getFieldDotted( name.c_str() );
+ BSONElement y = b.getFieldDotted( name.c_str() );
+ if ( ! x.isNumber() || ! y.isNumber() )
+ return -1;
+ return ( y.number() - x.number() ) / _sleep;
+ }
+
+ double percent( const char * outof , const char * val , const BSONObj& a , const BSONObj& b ){
+ double x = ( b.getFieldDotted( val ).number() - a.getFieldDotted( val ).number() );
+ double y = ( b.getFieldDotted( outof ).number() - a.getFieldDotted( outof ).number() );
+ if ( y == 0 )
+ return 0;
+ return x / y;
+ }
+
+ void cellstart( stringstream& ss , string name , unsigned& width ){
+ if ( ! _showHeaders ) {
+ return;
+ }
+ if ( name.size() > width )
+ width = name.size();
+ if ( _rowNum % 20 == 0 )
+ cout << setw(width) << name << " ";
+ }
+
+ void cell( stringstream& ss , string name , unsigned width , double val ){
+ cellstart( ss , name , width );
+ ss << setw(width) << setprecision(3) << val << " ";
+ }
+
+ void cell( stringstream& ss , string name , unsigned width , int val ){
+ cellstart( ss , name , width );
+ ss << setw(width) << val << " ";
+ }
+
+ void cell( stringstream& ss , string name , unsigned width , const string& val ){
+ assert( val.size() <= width );
+ cellstart( ss , name , width );
+ ss << setw(width) << val << " ";
+ }
+
+
+ string doRow( const BSONObj& a , const BSONObj& b ){
+ stringstream ss;
+
+ if ( b["opcounters"].type() == Object ){
+ BSONObj ax = a["opcounters"].embeddedObject();
+ BSONObj bx = b["opcounters"].embeddedObject();
+ BSONObjIterator i( bx );
+ while ( i.more() ){
+ BSONElement e = i.next();
+ cell( ss , (string)(e.fieldName()) + "/s" , 6 , (int)diff( e.fieldName() , ax , bx ) );
+ }
+ }
+
+ if ( b.getFieldDotted("mem.supported").trueValue() ){
+ BSONObj bx = b["mem"].embeddedObject();
+ BSONObjIterator i( bx );
+ cell( ss , "mapped" , 6 , bx["mapped"].numberInt() );
+ cell( ss , "vsize" , 6 , bx["virtual"].numberInt() );
+ cell( ss , "res" , 6 , bx["resident"].numberInt() );
+ }
+
+ cell( ss , "% locked" , 8 , percent( "globalLock.totalTime" , "globalLock.lockTime" , a , b ) );
+ cell( ss , "% idx miss" , 8 , percent( "indexCounters.btree.accesses" , "indexCounters.btree.misses" , a , b ) );
+
+ cell( ss , "conn" , 5 , b.getFieldDotted( "connections.current" ).numberInt() );
+
+ {
+ struct tm t;
+ time_t_to_Struct( time(0), &t , true );
+ stringstream temp;
+ temp << setfill('0') << setw(2) << t.tm_hour
+ << ":"
+ << setfill('0') << setw(2) << t.tm_min
+ << ":"
+ << setfill('0') << setw(2) << t.tm_sec;
+ cell( ss , "time" , 8 , temp.str() );
+ }
+
+ if ( _showHeaders && _rowNum % 20 == 0 ){
+ // this is the newline after the header line
+ cout << endl;
+ }
+ _rowNum++;
+
+ return ss.str();
+ }
+
+ int run(){
+ _sleep = getParam( "sleep" , _sleep );
+ if ( hasParam( "noheaders" ) ) {
+ _showHeaders = false;
+ }
+ _rowCount = getParam( "rowcount" , 0 );
+
+ BSONObj prev = stats();
+ if ( prev.isEmpty() )
+ return -1;
+
+ while ( _rowCount == 0 || _rowNum < _rowCount ){
+ sleepsecs(_sleep);
+ BSONObj now = stats();
+ if ( now.isEmpty() )
+ return -2;
+
+ cout << doRow( prev , now ) << endl;
+
+ prev = now;
+ }
+ return 0;
+ }
+
+
+ int _sleep;
+ int _rowNum;
+ int _rowCount;
+ bool _showHeaders;
+ };
+
+}
+
+int main( int argc , char ** argv ) {
+ mongo::Stat stat;
+ return stat.main( argc , argv );
+}
+
diff --git a/tools/tool.cpp b/tools/tool.cpp
index 8243a45..c92a0c4 100644
--- a/tools/tool.cpp
+++ b/tools/tool.cpp
@@ -1,3 +1,19 @@
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
// Tool.cpp
#include "tool.h"
@@ -14,225 +30,259 @@ using namespace mongo;
namespace po = boost::program_options;
-mongo::Tool::Tool( string name , string defaultDB , string defaultCollection ) :
- _name( name ) , _db( defaultDB ) , _coll( defaultCollection ) , _conn(0), _paired(false) {
-
- _options = new po::options_description( "options" );
- _options->add_options()
- ("help","produce help message")
- ("host,h",po::value<string>(), "mongo host to connect to" )
- ("db,d",po::value<string>(), "database to use" )
- ("collection,c",po::value<string>(), "collection to use (some commands)" )
- ("username,u",po::value<string>(), "username" )
- ("password,p",po::value<string>(), "password" )
- ("dbpath",po::value<string>(), "directly access mongod data files in this path, instead of connecting to a mongod instance" )
- ("verbose,v", "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
- ;
-
- _hidden_options = new po::options_description( name + " hidden options" );
-
- /* support for -vv -vvvv etc. */
- for (string s = "vv"; s.length() <= 10; s.append("v")) {
- _hidden_options->add_options()(s.c_str(), "verbose");
- }
-}
-
-mongo::Tool::~Tool(){
- delete( _options );
- delete( _hidden_options );
- if ( _conn )
- delete _conn;
-}
-
-void mongo::Tool::printExtraHelp( ostream & out ){
-}
+namespace mongo {
-void mongo::Tool::printHelp(ostream &out) {
- printExtraHelp(out);
- _options->print(out);
-}
+ Tool::Tool( string name , bool localDBAllowed , string defaultDB , string defaultCollection ) :
+ _name( name ) , _db( defaultDB ) , _coll( defaultCollection ) , _conn(0), _paired(false) {
+
+ _options = new po::options_description( "options" );
+ _options->add_options()
+ ("help","produce help message")
+ ("verbose,v", "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
+ ("host,h",po::value<string>(), "mongo host to connect to (\"left,right\" for pairs)" )
+ ("db,d",po::value<string>(), "database to use" )
+ ("collection,c",po::value<string>(), "collection to use (some commands)" )
+ ("username,u",po::value<string>(), "username" )
+ ("password,p",po::value<string>(), "password" )
+ ;
+ if ( localDBAllowed )
+ _options->add_options()
+ ("dbpath",po::value<string>(), "directly access mongod data "
+ "files in the given path, instead of connecting to a mongod "
+ "instance - needs to lock the data directory, so cannot be "
+ "used if a mongod is currently accessing the same path" )
+ ("directoryperdb", "if dbpath specified, each db is in a separate directory" )
+ ;
+
+ _hidden_options = new po::options_description( name + " hidden options" );
+
+ /* support for -vv -vvvv etc. */
+ for (string s = "vv"; s.length() <= 10; s.append("v")) {
+ _hidden_options->add_options()(s.c_str(), "verbose");
+ }
+ }
-int mongo::Tool::main( int argc , char ** argv ){
- boost::filesystem::path::default_name_check( boost::filesystem::no_check );
-
- _name = argv[0];
-
- /* using the same style as db.cpp */
- int command_line_style = (((po::command_line_style::unix_style ^
- po::command_line_style::allow_guessing) |
- po::command_line_style::allow_long_disguise) ^
- po::command_line_style::allow_sticky);
- try {
- po::options_description all_options("all options");
- all_options.add(*_options).add(*_hidden_options);
-
- po::store( po::command_line_parser( argc , argv ).
- options(all_options).
- positional( _positonalOptions ).
- style(command_line_style).run() , _params );
-
- po::notify( _params );
- } catch (po::error &e) {
- cout << "ERROR: " << e.what() << endl << endl;
- printHelp(cout);
- return EXIT_BADOPTIONS;
+ Tool::~Tool(){
+ delete( _options );
+ delete( _hidden_options );
+ if ( _conn )
+ delete _conn;
}
- if ( _params.count( "help" ) ){
- printHelp(cerr);
- return 0;
+ void Tool::printExtraHelp( ostream & out ){
}
- if ( _params.count( "verbose" ) ) {
- logLevel = 1;
+ void Tool::printHelp(ostream &out) {
+ printExtraHelp(out);
+ _options->print(out);
}
- for (string s = "vv"; s.length() <= 10; s.append("v")) {
- if (_params.count(s)) {
- logLevel = s.length();
+ int Tool::main( int argc , char ** argv ){
+ static StaticObserver staticObserver;
+
+ cmdLine.prealloc = false;
+
+ boost::filesystem::path::default_name_check( boost::filesystem::no_check );
+
+ _name = argv[0];
+
+ /* using the same style as db.cpp */
+ int command_line_style = (((po::command_line_style::unix_style ^
+ po::command_line_style::allow_guessing) |
+ po::command_line_style::allow_long_disguise) ^
+ po::command_line_style::allow_sticky);
+ try {
+ po::options_description all_options("all options");
+ all_options.add(*_options).add(*_hidden_options);
+
+ po::store( po::command_line_parser( argc , argv ).
+ options(all_options).
+ positional( _positonalOptions ).
+ style(command_line_style).run() , _params );
+
+ po::notify( _params );
+ } catch (po::error &e) {
+ cerr << "ERROR: " << e.what() << endl << endl;
+ printHelp(cerr);
+ return EXIT_BADOPTIONS;
}
- }
- if ( ! hasParam( "dbpath" ) ) {
- _host = "127.0.0.1";
- if ( _params.count( "host" ) )
- _host = _params["host"].as<string>();
+ if ( _params.count( "help" ) ){
+ printHelp(cerr);
+ return 0;
+ }
- if ( _host.find( "," ) == string::npos ){
- DBClientConnection * c = new DBClientConnection();
- _conn = c;
+ if ( _params.count( "verbose" ) ) {
+ logLevel = 1;
+ }
- string errmsg;
- if ( ! c->connect( _host , errmsg ) ){
- cerr << "couldn't connect to [" << _host << "] " << errmsg << endl;
- return -1;
+ for (string s = "vv"; s.length() <= 10; s.append("v")) {
+ if (_params.count(s)) {
+ logLevel = s.length();
}
}
- else {
- log(1) << "using pairing" << endl;
- DBClientPaired * c = new DBClientPaired();
- _paired = true;
- _conn = c;
- if ( ! c->connect( _host ) ){
- cerr << "couldn't connect to paired server: " << _host << endl;
+ bool useDirectClient = hasParam( "dbpath" );
+
+ if ( ! useDirectClient ) {
+ _host = "127.0.0.1";
+ if ( _params.count( "host" ) )
+ _host = _params["host"].as<string>();
+
+ if ( _host.find( "," ) == string::npos ){
+ DBClientConnection * c = new DBClientConnection();
+ _conn = c;
+
+ string errmsg;
+ if ( ! c->connect( _host , errmsg ) ){
+ cerr << "couldn't connect to [" << _host << "] " << errmsg << endl;
+ return -1;
+ }
+ }
+ else {
+ log(1) << "using pairing" << endl;
+ DBClientPaired * c = new DBClientPaired();
+ _paired = true;
+ _conn = c;
+
+ if ( ! c->connect( _host ) ){
+ cerr << "couldn't connect to paired server: " << _host << endl;
+ return -1;
+ }
+ }
+
+ cerr << "connected to: " << _host << endl;
+ }
+ else {
+ if ( _params.count( "directoryperdb" ) ) {
+ directoryperdb = true;
+ }
+ Client::initThread("tools");
+ _conn = new DBDirectClient();
+ _host = "DIRECT";
+ static string myDbpath = getParam( "dbpath" );
+ dbpath = myDbpath.c_str();
+ try {
+ acquirePathLock();
+ }
+ catch ( DBException& e ){
+ cerr << endl << "If you are running a mongod on the same "
+ "path you should connect to that instead of direct data "
+ "file access" << endl << endl;
+ dbexit( EXIT_CLEAN );
return -1;
}
+
+ theFileAllocator().start();
}
- cerr << "connected to: " << _host << endl;
- }
- else {
- Client::initThread("tools");
- _conn = new DBDirectClient();
- _host = "DIRECT";
- static string myDbpath = getParam( "dbpath" );
- mongo::dbpath = myDbpath.c_str();
- mongo::acquirePathLock();
- theFileAllocator().start();
- }
+ if ( _params.count( "db" ) )
+ _db = _params["db"].as<string>();
- if ( _params.count( "db" ) )
- _db = _params["db"].as<string>();
+ if ( _params.count( "collection" ) )
+ _coll = _params["collection"].as<string>();
- if ( _params.count( "collection" ) )
- _coll = _params["collection"].as<string>();
+ if ( _params.count( "username" ) )
+ _username = _params["username"].as<string>();
- if ( _params.count( "username" ) )
- _username = _params["username"].as<string>();
+ if ( _params.count( "password" ) )
+ _password = _params["password"].as<string>();
- if ( _params.count( "password" ) )
- _password = _params["password"].as<string>();
+ int ret = -1;
+ try {
+ ret = run();
+ }
+ catch ( DBException& e ){
+ cerr << "assertion: " << e.toString() << endl;
+ ret = -1;
+ }
+
+ if ( currentClient.get() )
+ currentClient->shutdown();
- int ret = -1;
- try {
- ret = run();
- }
- catch ( DBException& e ){
- cerr << "assertion: " << e.toString() << endl;
- ret = -1;
+ if ( useDirectClient )
+ dbexit( EXIT_CLEAN );
+ return ret;
}
-
- if ( currentClient.get() )
- currentClient->shutdown();
-
- return ret;
-}
-mongo::DBClientBase& mongo::Tool::conn( bool slaveIfPaired ){
- if ( _paired && slaveIfPaired )
- return ((DBClientPaired*)_conn)->slaveConn();
- return *_conn;
-}
+ DBClientBase& Tool::conn( bool slaveIfPaired ){
+ if ( _paired && slaveIfPaired )
+ return ((DBClientPaired*)_conn)->slaveConn();
+ return *_conn;
+ }
-void mongo::Tool::addFieldOptions(){
- add_options()
- ("fields,f" , po::value<string>() , "comma seperated list of field names e.g. -f name,age" )
- ("fieldFile" , po::value<string>() , "file with fields names - 1 per line" )
- ;
-}
+ void Tool::addFieldOptions(){
+ add_options()
+ ("fields,f" , po::value<string>() , "comma seperated list of field names e.g. -f name,age" )
+ ("fieldFile" , po::value<string>() , "file with fields names - 1 per line" )
+ ;
+ }
-void mongo::Tool::needFields(){
+ void Tool::needFields(){
- if ( hasParam( "fields" ) ){
- BSONObjBuilder b;
+ if ( hasParam( "fields" ) ){
+ BSONObjBuilder b;
- string fields_arg = getParam("fields");
- pcrecpp::StringPiece input(fields_arg);
+ string fields_arg = getParam("fields");
+ pcrecpp::StringPiece input(fields_arg);
- string f;
- pcrecpp::RE re("([\\w\\.]+),?" );
- while ( re.Consume( &input, &f ) ){
- _fields.push_back( f );
- b.append( f.c_str() , 1 );
- }
+ string f;
+ pcrecpp::RE re("([\\w\\.\\s]+),?" );
+ while ( re.Consume( &input, &f ) ){
+ _fields.push_back( f );
+ b.append( f.c_str() , 1 );
+ }
- _fieldsObj = b.obj();
- return;
- }
+ _fieldsObj = b.obj();
+ return;
+ }
- if ( hasParam( "fieldFile" ) ){
- string fn = getParam( "fieldFile" );
- if ( ! exists( fn ) )
- throw UserException( 9999 , ((string)"file: " + fn ) + " doesn't exist" );
-
- const int BUF_SIZE = 1024;
- char line[ 1024 + 128];
- ifstream file( fn.c_str() );
-
- BSONObjBuilder b;
- while ( file.rdstate() == ios_base::goodbit ){
- file.getline( line , BUF_SIZE );
- const char * cur = line;
- while ( isspace( cur[0] ) ) cur++;
- if ( strlen( cur ) == 0 )
- continue;
-
- _fields.push_back( cur );
- b.append( cur , 1 );
+ if ( hasParam( "fieldFile" ) ){
+ string fn = getParam( "fieldFile" );
+ if ( ! exists( fn ) )
+ throw UserException( 9999 , ((string)"file: " + fn ) + " doesn't exist" );
+
+ const int BUF_SIZE = 1024;
+ char line[ 1024 + 128];
+ ifstream file( fn.c_str() );
+
+ BSONObjBuilder b;
+ while ( file.rdstate() == ios_base::goodbit ){
+ file.getline( line , BUF_SIZE );
+ const char * cur = line;
+ while ( isspace( cur[0] ) ) cur++;
+ if ( strlen( cur ) == 0 )
+ continue;
+
+ _fields.push_back( cur );
+ b.append( cur , 1 );
+ }
+ _fieldsObj = b.obj();
+ return;
}
- _fieldsObj = b.obj();
- return;
+
+ throw UserException( 9998 , "you need to specify fields" );
}
- throw UserException( 9998 , "you need to specify fields" );
-}
+ void Tool::auth( string dbname ){
+ if ( ! dbname.size() )
+ dbname = _db;
+
+ if ( ! ( _username.size() || _password.size() ) )
+ return;
-void mongo::Tool::auth( string dbname ){
- if ( ! dbname.size() )
- dbname = _db;
+ string errmsg;
+ if ( _conn->auth( dbname , _username , _password , errmsg ) )
+ return;
- if ( ! ( _username.size() || _password.size() ) )
- return;
+ // try against the admin db
+ string err2;
+ if ( _conn->auth( "admin" , _username , _password , errmsg ) )
+ return;
- string errmsg;
- if ( _conn->auth( dbname , _username , _password , errmsg ) )
- return;
+ throw UserException( 9997 , (string)"auth failed: " + errmsg );
+ }
- // try against the admin db
- string err2;
- if ( _conn->auth( "admin" , _username , _password , errmsg ) )
- return;
- throw mongo::UserException( 9997 , (string)"auth failed: " + errmsg );
+ void setupSignals(){}
}
diff --git a/tools/tool.h b/tools/tool.h
index 18996ec..330fc2d 100644
--- a/tools/tool.h
+++ b/tools/tool.h
@@ -1,3 +1,19 @@
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
// Tool.h
#pragma once
@@ -19,7 +35,7 @@ namespace mongo {
class Tool {
public:
- Tool( string name , string defaultDB="test" , string defaultCollection="");
+ Tool( string name , bool localDBAllowed=true, string defaultDB="test" , string defaultCollection="");
virtual ~Tool();
int main( int argc , char ** argv );
@@ -39,6 +55,11 @@ namespace mongo {
return _params[name.c_str()].as<string>();
return def;
}
+ int getParam( string name , int def ){
+ if ( _params.count( name ) )
+ return _params[name.c_str()].as<int>();
+ return def;
+ }
bool hasParam( string name ){
return _params.count( name );
}
diff --git a/util/allocator.h b/util/allocator.h
index af8032c..90dbf24 100644
--- a/util/allocator.h
+++ b/util/allocator.h
@@ -34,16 +34,4 @@ namespace mongo {
#define malloc mongo::ourmalloc
#define realloc mongo::ourrealloc
-#if defined(_WIN32)
- inline void our_debug_free(void *p) {
-#if 0
- // this is not safe if you malloc < 4 bytes so we don't use anymore
- unsigned *u = (unsigned *) p;
- u[0] = 0xEEEEEEEE;
-#endif
- free(p);
- }
-#define free our_debug_free
-#endif
-
} // namespace mongo
diff --git a/util/array.h b/util/array.h
new file mode 100644
index 0000000..827c00e
--- /dev/null
+++ b/util/array.h
@@ -0,0 +1,104 @@
+// array.h
+
+namespace mongo {
+
+ template<typename T>
+ class FastArray {
+ public:
+ FastArray( int capacity=10000 )
+ : _capacity( capacity ) , _size(0) , _end(this,capacity){
+ _data = new T[capacity];
+ }
+
+ ~FastArray(){
+ delete[] _data;
+ }
+
+ void clear(){
+ _size = 0;
+ }
+
+ T& operator[]( int x ){
+ assert( x >= 0 && x < _capacity );
+ return _data[x];
+ }
+
+ T& getNext(){
+ return _data[_size++];
+ }
+
+ void push_back( const T& t ){
+ _data[_size++] = t;
+ }
+
+ void sort( int (*comp)(const void *, const void *) ){
+ qsort( _data , _size , sizeof(T) , comp );
+ }
+
+ int size(){
+ return _size;
+ }
+
+ bool hasSpace(){
+ return _size < _capacity;
+ }
+ class iterator {
+ public:
+ iterator(){
+ _it = 0;
+ _pos = 0;
+ }
+
+ iterator( FastArray * it , int pos=0 ){
+ _it = it;
+ _pos = pos;
+ }
+
+ bool operator==(const iterator& other ) const {
+ return _pos == other._pos;
+ }
+
+ bool operator!=(const iterator& other ) const {
+ return _pos != other._pos;
+ }
+
+ void operator++(){
+ _pos++;
+ }
+
+ T& operator*(){
+ return _it->_data[_pos];
+ }
+
+ operator string() const {
+ stringstream ss;
+ ss << _pos;
+ return ss.str();
+ }
+ private:
+ FastArray * _it;
+ int _pos;
+
+ friend class FastArray;
+ };
+
+
+ iterator begin(){
+ return iterator(this);
+ }
+
+ iterator end(){
+ _end._pos = _size;
+ return _end;
+ }
+
+
+ private:
+ int _capacity;
+ int _size;
+
+ iterator _end;
+
+ T * _data;
+ };
+}
diff --git a/util/assert_util.cpp b/util/assert_util.cpp
index d1d85b2..8c8477a 100644
--- a/util/assert_util.cpp
+++ b/util/assert_util.cpp
@@ -22,6 +22,26 @@
namespace mongo {
+ AssertionCount assertionCount;
+
+ AssertionCount::AssertionCount()
+ : regular(0),warning(0),msg(0),user(0),rollovers(0){
+ }
+
+ void AssertionCount::rollover(){
+ rollovers++;
+ regular = 0;
+ warning = 0;
+ msg = 0;
+ user = 0;
+ }
+
+ void AssertionCount::condrollover( int newvalue ){
+ static int max = (int)pow( 2.0 , 30 );
+ if ( newvalue >= max )
+ rollover();
+ }
+
string getDbContext();
Assertion lastAssert[4];
@@ -32,9 +52,11 @@ namespace mongo {
sayDbContext();
raiseError(0,msg && *msg ? msg : "wassertion failure");
lastAssert[1].set(msg, getDbContext().c_str(), file, line);
+ assertionCount.condrollover( ++assertionCount.warning );
}
void asserted(const char *msg, const char *file, unsigned line) {
+ assertionCount.condrollover( ++assertionCount.regular );
problem() << "Assertion failure " << msg << ' ' << file << ' ' << dec << line << endl;
sayDbContext();
raiseError(0,msg && *msg ? msg : "assertion failure");
@@ -54,6 +76,7 @@ namespace mongo {
int uacount = 0;
void uasserted(int msgid, const char *msg) {
+ assertionCount.condrollover( ++assertionCount.user );
if ( ++uacount < 100 )
log() << "User Exception " << msgid << ":" << msg << endl;
else
@@ -64,6 +87,7 @@ namespace mongo {
}
void msgasserted(int msgid, const char *msg) {
+ assertionCount.condrollover( ++assertionCount.warning );
log() << "Assertion: " << msgid << ":" << msg << endl;
lastAssert[2].set(msg, getDbContext().c_str(), "", 0);
raiseError(msgid,msg && *msg ? msg : "massert failure");
@@ -72,13 +96,22 @@ namespace mongo {
throw MsgAssertionException(msgid, msg);
}
- boost::mutex *Assertion::_mutex = new boost::mutex();
+ void streamNotGood( int code , string msg , std::ios& myios ){
+ stringstream ss;
+ // errno might not work on all systems for streams
+ // if it doesn't for a system should deal with here
+ ss << msg << " stream invalie: " << OUTPUT_ERRNO;
+ throw UserException( code , ss.str() );
+ }
+
+
+ mongo::mutex *Assertion::_mutex = new mongo::mutex();
string Assertion::toString() {
if( _mutex == 0 )
return "";
- boostlock lk(*_mutex);
+ scoped_lock lk(*_mutex);
if ( !isSet() )
return "";
@@ -166,5 +199,14 @@ namespace mongo {
void rotateLogs( int signal ){
loggingManager.rotate();
}
+
+ string errnostring( const char * prefix ){
+ stringstream ss;
+ if ( prefix )
+ ss << prefix << ": ";
+ ss << OUTPUT_ERRNO;
+ return ss.str();
+ }
+
}
diff --git a/util/assert_util.h b/util/assert_util.h
index ccb60a0..bae3a55 100644
--- a/util/assert_util.h
+++ b/util/assert_util.h
@@ -32,7 +32,7 @@ namespace mongo {
when = 0;
}
private:
- static boost::mutex *_mutex;
+ static mongo::mutex *_mutex;
char msg[128];
char context[128];
const char *file;
@@ -44,7 +44,7 @@ namespace mongo {
/* asserted during global variable initialization */
return;
}
- boostlock lk(*_mutex);
+ scoped_lock lk(*_mutex);
strncpy(msg, m, 127);
strncpy(context, ctxt, 127);
file = f;
@@ -67,6 +67,21 @@ namespace mongo {
/* last assert of diff types: regular, wassert, msgassert, uassert: */
extern Assertion lastAssert[4];
+ class AssertionCount {
+ public:
+ AssertionCount();
+ void rollover();
+ void condrollover( int newValue );
+
+ int regular;
+ int warning;
+ int msg;
+ int user;
+ int rollovers;
+ };
+
+ extern AssertionCount assertionCount;
+
class DBException : public std::exception {
public:
virtual const char* what() const throw() = 0;
@@ -91,6 +106,11 @@ namespace mongo {
}
virtual int getCode(){ return code; }
virtual const char* what() const throw() { return msg.c_str(); }
+
+ /* true if an interrupted exception - see KillCurrentOp */
+ bool interrupted() {
+ return code == 11600 || code == 11601;
+ }
};
/* UserExceptions are valid errors that a user can cause, like out of disk space or duplicate key */
@@ -173,6 +193,10 @@ namespace mongo {
#define ASSERT_ID_DUPKEY 11000
+ void streamNotGood( int code , string msg , std::ios& myios );
+
+#define ASSERT_STREAM_GOOD(msgid,msg,stream) (void)( (!!((stream).good())) || (mongo::streamNotGood(msgid, msg, stream), 0) )
+
} // namespace mongo
#define BOOST_CHECK_EXCEPTION( expression ) \
@@ -184,3 +208,12 @@ namespace mongo {
} catch ( ... ) { \
massert( 10437 , "unknown boost failed" , false ); \
}
+
+#define DESTRUCTOR_GUARD( expression ) \
+ try { \
+ expression; \
+ } catch ( const std::exception &e ) { \
+ problem() << "caught exception (" << e.what() << ") in destructor (" << __FUNCTION__ << ")" << endl; \
+ } catch ( ... ) { \
+ problem() << "caught unknown exception in destructor (" << __FUNCTION__ << ")" << endl; \
+ }
diff --git a/util/atomic_int.h b/util/atomic_int.h
new file mode 100644
index 0000000..de50560
--- /dev/null
+++ b/util/atomic_int.h
@@ -0,0 +1,100 @@
+// atomic_int.h
+// atomic wrapper for unsigned
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#if defined(_WIN32)
+# include <windows.h>
+#endif
+
+namespace mongo{
+
+
+ struct AtomicUInt{
+ AtomicUInt() : x(0) {}
+ AtomicUInt(unsigned z) : x(z) { }
+ volatile unsigned x;
+ operator unsigned() const {
+ return x;
+ }
+ inline AtomicUInt operator++(); // ++prefix
+ inline AtomicUInt operator++(int);// postfix++
+ inline AtomicUInt operator--(); // --prefix
+ inline AtomicUInt operator--(int); // postfix--
+ };
+
+#if defined(_WIN32)
+ AtomicUInt AtomicUInt::operator++(){
+ // InterlockedIncrement returns the new value
+ return InterlockedIncrement((volatile long*)&x); //long is 32bits in Win64
+ }
+ AtomicUInt AtomicUInt::operator++(int){
+ return InterlockedIncrement((volatile long*)&x)-1;
+ }
+ AtomicUInt AtomicUInt::operator--(){
+ return InterlockedDecrement((volatile long*)&x);
+ }
+ AtomicUInt AtomicUInt::operator--(int){
+ return InterlockedDecrement((volatile long*)&x)+1;
+ }
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ // this is in GCC >= 4.1
+ AtomicUInt AtomicUInt::operator++(){
+ return __sync_add_and_fetch(&x, 1);
+ }
+ AtomicUInt AtomicUInt::operator++(int){
+ return __sync_fetch_and_add(&x, 1);
+ }
+ AtomicUInt AtomicUInt::operator--(){
+ return __sync_add_and_fetch(&x, -1);
+ }
+ AtomicUInt AtomicUInt::operator--(int){
+ return __sync_fetch_and_add(&x, -1);
+ }
+#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+ // from boost 1.39 interprocess/detail/atomic.hpp
+
+ inline unsigned atomic_int_helper(volatile unsigned *x, int val){
+ int r;
+ asm volatile
+ (
+ "lock\n\t"
+ "xadd %1, %0":
+ "+m"( *x ), "=r"( r ): // outputs (%0, %1)
+ "1"( val ): // inputs (%2 == %1)
+ "memory", "cc" // clobbers
+ );
+ return r;
+ }
+ AtomicUInt AtomicUInt::operator++(){
+ return atomic_int_helper(&x, 1)+1;
+ }
+ AtomicUInt AtomicUInt::operator++(int){
+ return atomic_int_helper(&x, 1);
+ }
+ AtomicUInt AtomicUInt::operator--(){
+ return atomic_int_helper(&x, -1)-1;
+ }
+ AtomicUInt AtomicUInt::operator--(int){
+ return atomic_int_helper(&x, -1);
+ }
+#else
+# error "unsupported compiler or platform"
+#endif
+
+} // namespace mongo
diff --git a/util/background.cpp b/util/background.cpp
index ac3a48c..4125315 100644
--- a/util/background.cpp
+++ b/util/background.cpp
@@ -22,7 +22,7 @@
namespace mongo {
BackgroundJob *BackgroundJob::grab = 0;
- boost::mutex &BackgroundJob::mutex = *( new boost::mutex );
+ mongo::mutex BackgroundJob::mutex;
/* static */
void BackgroundJob::thr() {
@@ -38,7 +38,7 @@ namespace mongo {
}
BackgroundJob& BackgroundJob::go() {
- boostlock bl(mutex);
+ scoped_lock bl(mutex);
assert( grab == 0 );
grab = this;
boost::thread t(thr);
diff --git a/util/background.h b/util/background.h
index ff044cb..c95a5bd 100644
--- a/util/background.h
+++ b/util/background.h
@@ -27,7 +27,6 @@ namespace mongo {
has finished. Thus one pattern of use is to embed a backgroundjob
in your object and reuse it (or same thing with inheritance).
*/
-
class BackgroundJob {
protected:
/* define this to do your work! */
@@ -65,7 +64,7 @@ namespace mongo {
private:
static BackgroundJob *grab;
- static boost::mutex &mutex;
+ static mongo::mutex mutex;
static void thr();
volatile State state;
};
diff --git a/util/base64.cpp b/util/base64.cpp
index cf2f485..8d9d544 100644
--- a/util/base64.cpp
+++ b/util/base64.cpp
@@ -17,48 +17,13 @@
*/
#include "stdafx.h"
+#include "base64.h"
namespace mongo {
namespace base64 {
- class Alphabet {
- public:
- Alphabet(){
- encode = (unsigned char*)
- "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
- "abcdefghijklmnopqrstuvwxyz"
- "0123456789"
- "+/";
-
- decode = (unsigned char*)malloc(257);
- memset( decode , 0 , 256 );
- for ( int i=0; i<64; i++ ){
- decode[ encode[i] ] = i;
- }
-
- test();
- }
- ~Alphabet(){
- free( decode );
- }
+ Alphabet alphabet;
- void test(){
- assert( strlen( (char*)encode ) == 64 );
- for ( int i=0; i<26; i++ )
- assert( encode[i] == toupper( encode[i+26] ) );
- }
-
- char e( int x ){
- return encode[x&0x3f];
- }
-
- private:
- const unsigned char * encode;
- public:
- unsigned char * decode;
- } alphabet;
-
-
void encode( stringstream& ss , const char * data , int size ){
for ( int i=0; i<size; i+=3 ){
int left = size - i;
diff --git a/util/base64.h b/util/base64.h
index 62caceb..c113eed 100644
--- a/util/base64.h
+++ b/util/base64.h
@@ -15,10 +15,47 @@
* limitations under the License.
*/
+#pragma once
namespace mongo {
namespace base64 {
+ class Alphabet {
+ public:
+ Alphabet()
+ : encode((unsigned char*)
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789"
+ "+/")
+ , decode(new unsigned char[257])
+ {
+ memset( decode.get() , 0 , 256 );
+ for ( int i=0; i<64; i++ ){
+ decode[ encode[i] ] = i;
+ }
+
+ test();
+ }
+ void test(){
+ assert( strlen( (char*)encode ) == 64 );
+ for ( int i=0; i<26; i++ )
+ assert( encode[i] == toupper( encode[i+26] ) );
+ }
+
+ char e( int x ){
+ return encode[x&0x3f];
+ }
+
+ private:
+ const unsigned char * encode;
+ public:
+ boost::scoped_array<unsigned char> decode;
+ };
+
+ extern Alphabet alphabet;
+
+
void encode( stringstream& ss , const char * data , int size );
string encode( const char * data , int size );
string encode( const string& s );
diff --git a/util/builder.h b/util/builder.h
index 5046b72..f9d3514 100644
--- a/util/builder.h
+++ b/util/builder.h
@@ -90,7 +90,7 @@ namespace mongo {
append<double>(j);
}
- void append(const void *src, int len) {
+ void append(const void *src, size_t len) {
memcpy(grow(len), src, len);
}
@@ -102,6 +102,10 @@ namespace mongo {
append( (void *)str.c_str(), str.length() + 1 );
}
+ void append( int val , int padding ){
+
+ }
+
int len() const {
return l;
}
@@ -197,7 +201,7 @@ namespace mongo {
}
string str(){
- return string(_buf.data,0,_buf.l);
+ return string(_buf.data, _buf.l);
}
private:
diff --git a/util/debug_util.cpp b/util/debug_util.cpp
index 283053f..9c2f5dc 100644
--- a/util/debug_util.cpp
+++ b/util/debug_util.cpp
@@ -21,7 +21,7 @@
namespace mongo {
-#if defined(_DEBUG) && !defined(_WIN32)
+#if defined(USE_GDBSERVER)
/* Magic gdb trampoline
* Do not call directly! call setupSIGTRAPforGDB()
* Assumptions:
diff --git a/util/file_allocator.h b/util/file_allocator.h
index 73159d3..93b2b1c 100644
--- a/util/file_allocator.h
+++ b/util/file_allocator.h
@@ -54,7 +54,7 @@ namespace mongo {
on windows anyway as we don't have to pre-zero the file there.
*/
#if !defined(_WIN32)
- boostlock lk( pendingMutex_ );
+ scoped_lock lk( pendingMutex_ );
if ( failed_ )
return;
long oldSize = prevSize( name );
@@ -71,7 +71,7 @@ namespace mongo {
// updated to match existing file size.
void allocateAsap( const string &name, long &size ) {
#if !defined(_WIN32)
- boostlock lk( pendingMutex_ );
+ scoped_lock lk( pendingMutex_ );
long oldSize = prevSize( name );
if ( oldSize != -1 ) {
size = oldSize;
@@ -91,7 +91,7 @@ namespace mongo {
pendingUpdated_.notify_all();
while( inProgress( name ) ) {
checkFailure();
- pendingUpdated_.wait( lk );
+ pendingUpdated_.wait( lk.boost() );
}
#endif
}
@@ -100,9 +100,9 @@ namespace mongo {
#if !defined(_WIN32)
if ( failed_ )
return;
- boostlock lk( pendingMutex_ );
+ scoped_lock lk( pendingMutex_ );
while( pending_.size() != 0 )
- pendingUpdated_.wait( lk );
+ pendingUpdated_.wait( lk.boost() );
#endif
}
@@ -130,7 +130,7 @@ namespace mongo {
return false;
}
- mutable boost::mutex pendingMutex_;
+ mutable mongo::mutex pendingMutex_;
mutable boost::condition pendingUpdated_;
list< string > pending_;
mutable map< string, long > pendingSize_;
@@ -142,21 +142,22 @@ namespace mongo {
void operator()() {
while( 1 ) {
{
- boostlock lk( a_.pendingMutex_ );
+ scoped_lock lk( a_.pendingMutex_ );
if ( a_.pending_.size() == 0 )
- a_.pendingUpdated_.wait( lk );
+ a_.pendingUpdated_.wait( lk.boost() );
}
while( 1 ) {
string name;
long size;
{
- boostlock lk( a_.pendingMutex_ );
+ scoped_lock lk( a_.pendingMutex_ );
if ( a_.pending_.size() == 0 )
break;
name = a_.pending_.front();
size = a_.pendingSize_[ name ];
}
try {
+ log() << "allocating new datafile " << name << ", filling with zeroes..." << endl;
long fd = open(name.c_str(), O_CREAT | O_RDWR | O_NOATIME, S_IRUSR | S_IWUSR);
if ( fd <= 0 ) {
stringstream ss;
@@ -180,19 +181,19 @@ namespace mongo {
massert( 10442 , "Unable to allocate file of desired size",
1 == write(fd, "", 1) );
lseek(fd, 0, SEEK_SET);
- log() << "allocating new datafile " << name << ", filling with zeroes..." << endl;
Timer t;
long z = 256 * 1024;
char buf[z];
memset(buf, 0, z);
long left = size;
- while ( 1 ) {
- if ( left <= z ) {
- massert( 10443 , "write failed", left == write(fd, buf, left) );
- break;
- }
- massert( 10444 , "write failed", z == write(fd, buf, z) );
- left -= z;
+ while ( left > 0 ) {
+ long towrite = left;
+ if ( towrite > z )
+ towrite = z;
+
+ int written = write( fd , buf , towrite );
+ massert( 10443 , errnostring("write failed" ), written > 0 );
+ left -= written;
}
log() << "done allocating datafile " << name << ", size: " << size/1024/1024 << "MB, took " << ((double)t.millis())/1000.0 << " secs" << endl;
}
@@ -205,7 +206,7 @@ namespace mongo {
BOOST_CHECK_EXCEPTION( boost::filesystem::remove( name ) );
} catch ( ... ) {
}
- boostlock lk( a_.pendingMutex_ );
+ scoped_lock lk( a_.pendingMutex_ );
a_.failed_ = true;
// not erasing from pending
a_.pendingUpdated_.notify_all();
@@ -213,7 +214,7 @@ namespace mongo {
}
{
- boostlock lk( a_.pendingMutex_ );
+ scoped_lock lk( a_.pendingMutex_ );
a_.pendingSize_.erase( name );
a_.pending_.pop_front();
a_.pendingUpdated_.notify_all();
diff --git a/util/goodies.h b/util/goodies.h
index 7eebc0a..4641941 100644
--- a/util/goodies.h
+++ b/util/goodies.h
@@ -24,7 +24,7 @@
namespace mongo {
-#if !defined(_WIN32) && !defined(NOEXECINFO)
+#if !defined(_WIN32) && !defined(NOEXECINFO) && !defined(__freebsd__) && !defined(__sun__)
} // namespace mongo
@@ -120,36 +120,11 @@ namespace mongo {
x = 0;
}
WrappingInt(unsigned z) : x(z) { }
- volatile unsigned x;
+ unsigned x;
operator unsigned() const {
return x;
}
- // returns original value (like x++)
- WrappingInt atomicIncrement(){
-#if defined(_WIN32)
- // InterlockedIncrement returns the new value
- return InterlockedIncrement((volatile long*)&x)-1; //long is 32bits in Win64
-#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
- // this is in GCC >= 4.1
- return __sync_fetch_and_add(&x, 1);
-#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
- // from boost 1.39 interprocess/detail/atomic.hpp
- int r;
- int val = 1;
- asm volatile
- (
- "lock\n\t"
- "xadd %1, %0":
- "+m"( x ), "=r"( r ): // outputs (%0, %1)
- "1"( val ): // inputs (%2 == %1)
- "memory", "cc" // clobbers
- );
- return r;
-#else
-# error "unsupported compiler or platform"
-#endif
- }
static int diff(unsigned a, unsigned b) {
return a-b;
@@ -179,6 +154,23 @@ namespace mongo {
buf[24] = 0; // don't want the \n
}
+
+ inline void time_t_to_Struct(time_t t, struct tm * buf , bool local = false ) {
+#if defined(_WIN32)
+ if ( local )
+ localtime_s( buf , &t );
+ else
+ gmtime_s(buf, &t);
+#else
+ if ( local )
+ localtime_r(&t, buf);
+ else
+ gmtime_r(&t, buf);
+#endif
+ }
+
+
+
#define asctime _asctime_not_threadsafe_
#define gmtime _gmtime_not_threadsafe_
#define localtime _localtime_not_threadsafe_
@@ -278,8 +270,42 @@ namespace mongo {
return secs*1000000 + t;
}
using namespace boost;
- typedef boost::mutex::scoped_lock boostlock;
- typedef boost::recursive_mutex::scoped_lock recursive_boostlock;
+
+ extern bool __destroyingStatics;
+
+ // If you create a local static instance of this class, that instance will be destroyed
+ // before all global static objects are destroyed, so __destroyingStatics will be set
+ // to true before the global static variables are destroyed.
+ class StaticObserver : boost::noncopyable {
+ public:
+ ~StaticObserver() { __destroyingStatics = true; }
+ };
+
+ // On pthread systems, it is an error to destroy a mutex while held. Static global
+ // mutexes may be held upon shutdown in our implementation, and this way we avoid
+ // destroying them.
+ class mutex : boost::noncopyable {
+ public:
+ mutex() { new (_buf) boost::mutex(); }
+ ~mutex() {
+ if( !__destroyingStatics ) {
+ boost().boost::mutex::~mutex();
+ }
+ }
+ class scoped_lock : boost::noncopyable {
+ public:
+ scoped_lock( mongo::mutex &m ) : _l( m.boost() ) {}
+ boost::mutex::scoped_lock &boost() { return _l; }
+ private:
+ boost::mutex::scoped_lock _l;
+ };
+ private:
+ boost::mutex &boost() { return *( boost::mutex * )( _buf ); }
+ char _buf[ sizeof( boost::mutex ) ];
+ };
+
+ typedef mongo::mutex::scoped_lock scoped_lock;
+ typedef boost::recursive_mutex::scoped_lock recursive_scoped_lock;
// simple scoped timer
class Timer {
@@ -318,7 +344,7 @@ namespace mongo {
class DebugMutex : boost::noncopyable {
friend class lock;
- boost::mutex m;
+ mongo::mutex m;
int locked;
public:
DebugMutex() : locked(0); { }
@@ -327,17 +353,17 @@ namespace mongo {
*/
-//typedef boostlock lock;
+//typedef scoped_lock lock;
inline bool startsWith(const char *str, const char *prefix) {
- unsigned l = strlen(prefix);
+ size_t l = strlen(prefix);
if ( strlen(str) < l ) return false;
return strncmp(str, prefix, l) == 0;
}
inline bool endsWith(const char *p, const char *suffix) {
- int a = strlen(p);
- int b = strlen(suffix);
+ size_t a = strlen(p);
+ size_t b = strlen(suffix);
if ( b > a ) return false;
return strcmp(p + a - b, suffix) == 0;
}
@@ -418,12 +444,39 @@ namespace mongo {
class ProgressMeter {
public:
- ProgressMeter( long long total , int secondsBetween = 3 , int checkInterval = 100 )
- : _total( total ) , _secondsBetween( secondsBetween ) , _checkInterval( checkInterval ) ,
- _done(0) , _hits(0) , _lastTime( (int) time(0) ){
+ ProgressMeter( long long total , int secondsBetween = 3 , int checkInterval = 100 ){
+ reset( total , secondsBetween , checkInterval );
+ }
+
+ ProgressMeter(){
+ _active = 0;
+ }
+
+ void reset( long long total , int secondsBetween = 3 , int checkInterval = 100 ){
+ _total = total;
+ _secondsBetween = secondsBetween;
+ _checkInterval = checkInterval;
+
+ _done = 0;
+ _hits = 0;
+ _lastTime = (int)time(0);
+
+ _active = 1;
+ }
+
+ void finished(){
+ _active = 0;
+ }
+
+ bool isActive(){
+ return _active;
}
bool hit( int n = 1 ){
+ if ( ! _active ){
+ cout << "warning: hit on in-active ProgressMeter" << endl;
+ }
+
_done += n;
_hits++;
if ( _hits % _checkInterval )
@@ -449,7 +502,16 @@ namespace mongo {
return _hits;
}
+ string toString() const {
+ if ( ! _active )
+ return "";
+ stringstream buf;
+ buf << _done << "/" << _total << " " << (_done*100)/_total << "%";
+ return buf.str();
+ }
private:
+
+ bool _active;
long long _total;
int _secondsBetween;
@@ -468,7 +530,7 @@ namespace mongo {
}
bool tryAcquire(){
- boostlock lk( _mutex );
+ scoped_lock lk( _mutex );
if ( _num <= 0 ){
if ( _num < 0 ){
cerr << "DISASTER! in TicketHolder" << endl;
@@ -480,12 +542,12 @@ namespace mongo {
}
void release(){
- boostlock lk( _mutex );
+ scoped_lock lk( _mutex );
_num++;
}
void resize( int newSize ){
- boostlock lk( _mutex );
+ scoped_lock lk( _mutex );
int used = _outof - _num;
if ( used > newSize ){
cout << "ERROR: can't resize since we're using (" << used << ") more than newSize(" << newSize << ")" << endl;
@@ -507,7 +569,7 @@ namespace mongo {
private:
int _outof;
int _num;
- boost::mutex _mutex;
+ mongo::mutex _mutex;
};
class TicketHolderReleaser {
@@ -523,4 +585,108 @@ namespace mongo {
TicketHolder * _holder;
};
+
+ /**
+ * this is a thread safe string
+ * you will never get a bad pointer, though data may be mungedd
+ */
+ class ThreadSafeString {
+ public:
+ ThreadSafeString( size_t size=256 )
+ : _size( 256 ) , _buf( new char[256] ){
+ memset( _buf , 0 , _size );
+ }
+
+ ThreadSafeString( const ThreadSafeString& other )
+ : _size( other._size ) , _buf( new char[_size] ){
+ strncpy( _buf , other._buf , _size );
+ }
+
+ ~ThreadSafeString(){
+ delete[] _buf;
+ _buf = 0;
+ }
+
+ operator string() const {
+ string s = _buf;
+ return s;
+ }
+
+ ThreadSafeString& operator=( const char * str ){
+ size_t s = strlen(str);
+ if ( s >= _size - 2 )
+ s = _size - 2;
+ strncpy( _buf , str , s );
+ _buf[s] = 0;
+ return *this;
+ }
+
+ bool operator==( const ThreadSafeString& other ) const {
+ return strcmp( _buf , other._buf ) == 0;
+ }
+
+ bool operator==( const char * str ) const {
+ return strcmp( _buf , str ) == 0;
+ }
+
+ bool operator!=( const char * str ) const {
+ return strcmp( _buf , str );
+ }
+
+ bool empty() const {
+ return _buf[0] == 0;
+ }
+
+ private:
+ size_t _size;
+ char * _buf;
+ };
+
+ ostream& operator<<( ostream &s, const ThreadSafeString &o );
+
+ inline bool isNumber( char c ) {
+ return c >= '0' && c <= '9';
+ }
+
+ // for convenience, '{' is greater than anything and stops number parsing
+ inline int lexNumCmp( const char *s1, const char *s2 ) {
+ int nret = 0;
+ while( *s1 && *s2 ) {
+ bool p1 = ( *s1 == '{' );
+ bool p2 = ( *s2 == '{' );
+ if ( p1 && !p2 )
+ return 1;
+ if ( p2 && !p1 )
+ return -1;
+ bool n1 = isNumber( *s1 );
+ bool n2 = isNumber( *s2 );
+ if ( n1 && n2 ) {
+ if ( nret == 0 ) {
+ nret = *s1 > *s2 ? 1 : ( *s1 == *s2 ? 0 : -1 );
+ }
+ } else if ( n1 ) {
+ return 1;
+ } else if ( n2 ) {
+ return -1;
+ } else {
+ if ( nret ) {
+ return nret;
+ }
+ if ( *s1 > *s2 ) {
+ return 1;
+ } else if ( *s2 > *s1 ) {
+ return -1;
+ }
+ nret = 0;
+ }
+ ++s1; ++s2;
+ }
+ if ( *s1 ) {
+ return 1;
+ } else if ( *s2 ) {
+ return -1;
+ }
+ return nret;
+ }
+
} // namespace mongo
diff --git a/util/hashtab.h b/util/hashtab.h
index 214c0ae..d46591c 100644
--- a/util/hashtab.h
+++ b/util/hashtab.h
@@ -149,7 +149,7 @@ namespace mongo {
typedef void (*IteratorCallback)( const Key& k , Type& v );
- void iterall( IteratorCallback callback ){
+ void iterAll( IteratorCallback callback ){
for ( int i=0; i<n; i++ ){
if ( ! nodes[i].inUse() )
continue;
diff --git a/util/hex.h b/util/hex.h
new file mode 100644
index 0000000..cef3e80
--- /dev/null
+++ b/util/hex.h
@@ -0,0 +1,35 @@
+// util/hex.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace mongo {
+ //can't use hex namespace because it conflicts with hex iostream function
+ inline int fromHex( char c ) {
+ if ( '0' <= c && c <= '9' )
+ return c - '0';
+ if ( 'a' <= c && c <= 'f' )
+ return c - 'a' + 10;
+ if ( 'A' <= c && c <= 'F' )
+ return c - 'A' + 10;
+ assert( false );
+ return 0xff;
+ }
+ inline char fromHex( const char *c ) {
+ return ( fromHex( c[ 0 ] ) << 4 ) | fromHex( c[ 1 ] );
+ }
+}
diff --git a/util/httpclient.cpp b/util/httpclient.cpp
index 284bb63..08b6220 100644
--- a/util/httpclient.cpp
+++ b/util/httpclient.cpp
@@ -17,10 +17,25 @@
#include "stdafx.h"
#include "httpclient.h"
+#include "sock.h"
+#include "message.h"
+#include "builder.h"
namespace mongo {
- int HttpClient::get( string url , map<string,string>& headers, stringstream& data ){
+ //#define HD(x) cout << x << endl;
+#define HD(x)
+
+
+ int HttpClient::get( string url , Result * result ){
+ return _go( "GET" , url , 0 , result );
+ }
+
+ int HttpClient::post( string url , string data , Result * result ){
+ return _go( "POST" , url , data.c_str() , result );
+ }
+
+ int HttpClient::_go( const char * command , string url , const char * body , Result * result ){
uassert( 10271 , "invalid url" , url.find( "http://" ) == 0 );
url = url.substr( 7 );
@@ -34,28 +49,84 @@ namespace mongo {
path = url.substr( url.find( "/" ) );
}
+
+ HD( "host [" << host << "]" );
+ HD( "path [" << path << "]" );
+
+ string server = host;
int port = 80;
- uassert( 10272 , "non standard port not supported yet" , host.find( ":" ) == string::npos );
- cout << "host [" << host << "]" << endl;
- cout << "path [" << path << "]" << endl;
- cout << "port: " << port << endl;
+ string::size_type idx = host.find( ":" );
+ if ( idx != string::npos ){
+ server = host.substr( 0 , idx );
+ string t = host.substr( idx + 1 );
+ port = atoi( t.c_str() );
+ }
+
+ HD( "server [" << server << "]" );
+ HD( "port [" << port << "]" );
string req;
{
stringstream ss;
- ss << "GET " << path << " HTTP/1.1\r\n";
+ ss << command << " " << path << " HTTP/1.1\r\n";
ss << "Host: " << host << "\r\n";
ss << "Connection: Close\r\n";
ss << "User-Agent: mongodb http client\r\n";
+ if ( body ) {
+ ss << "Content-Length: " << strlen( body ) << "\r\n";
+ }
ss << "\r\n";
+ if ( body ) {
+ ss << body;
+ }
req = ss.str();
}
+
+ SockAddr addr( server.c_str() , port );
+ HD( "addr: " << addr.toString() );
+
+ MessagingPort p;
+ if ( ! p.connect( addr ) )
+ return -1;
+
+ {
+ const char * out = req.c_str();
+ int toSend = req.size();
+ while ( toSend ){
+ int did = p.send( out , toSend );
+ toSend -= did;
+ out += did;
+ }
+ }
+
+ char buf[4096];
+ int got = p.recv( buf , 4096 );
+ buf[got] = 0;
+
+ int rc;
+ char version[32];
+ assert( sscanf( buf , "%s %d" , version , &rc ) == 2 );
+ HD( "rc: " << rc );
+
+ StringBuilder sb;
+ if ( result )
+ sb << buf;
+
+ while ( ( got = p.recv( buf , 4096 ) ) > 0){
+ if ( result )
+ sb << buf;
+ }
- cout << req << endl;
+ if ( result ){
+ result->_code = rc;
+ result->_entireResponse = sb.str();
+ }
- return -1;
+ return rc;
}
+
+
}
diff --git a/util/httpclient.h b/util/httpclient.h
index 14f0d87..ef3e147 100644
--- a/util/httpclient.h
+++ b/util/httpclient.h
@@ -23,7 +23,33 @@ namespace mongo {
class HttpClient {
public:
- int get( string url , map<string,string>& headers, stringstream& data );
+
+ class Result {
+ public:
+ Result(){}
+
+ const string& getEntireResponse() const {
+ return _entireResponse;
+ }
+ private:
+ int _code;
+ string _entireResponse;
+ friend class HttpClient;
+ };
+
+ /**
+ * @return response code
+ */
+ int get( string url , Result * result = 0 );
+
+ /**
+ * @return response code
+ */
+ int post( string url , string body , Result * result = 0 );
+
+ private:
+ int _go( const char * command , string url , const char * body , Result * result );
+
};
}
diff --git a/util/log.h b/util/log.h
index a9f43c8..668557a 100644
--- a/util/log.h
+++ b/util/log.h
@@ -18,6 +18,7 @@
#pragma once
#include <string.h>
+#include <errno.h>
namespace mongo {
@@ -117,7 +118,7 @@ namespace mongo {
#define LOGIT { ss << x; return *this; }
class Logstream : public Nullstream {
- static boost::mutex &mutex;
+ static mongo::mutex mutex;
static int doneSetup;
stringstream ss;
public:
@@ -127,7 +128,7 @@ namespace mongo {
void flush() {
// this ensures things are sane
if ( doneSetup == 1717 ){
- boostlock lk(mutex);
+ scoped_lock lk(mutex);
cout << ss.str();
cout.flush();
}
@@ -244,4 +245,6 @@ namespace mongo {
#define OUTPUT_ERRNOX(x) "errno:" << x << " " << strerror(x)
#define OUTPUT_ERRNO OUTPUT_ERRNOX(errno)
+ string errnostring( const char * prefix = 0 );
+
} // namespace mongo
diff --git a/util/message.cpp b/util/message.cpp
index 0fbc2d2..2c3d006 100644
--- a/util/message.cpp
+++ b/util/message.cpp
@@ -35,9 +35,11 @@ namespace mongo {
#define mmm(x)
#ifdef MSG_NOSIGNAL
- const int portSendFlags = MSG_NOSIGNAL;
+ const int portSendFlags = MSG_NOSIGNAL;
+ const int portRecvFlags = MSG_NOSIGNAL;
#else
- const int portSendFlags = 0;
+ const int portSendFlags = 0;
+ const int portRecvFlags = 0;
#endif
/* listener ------------------------------------------------------------------- */
@@ -72,7 +74,7 @@ namespace mongo {
void Listener::listen() {
static long connNumber = 0;
SockAddr from;
- while ( 1 ) {
+ while ( ! inShutdown() ) {
int s = accept(sock, (sockaddr *) &from.sa, &from.addressSize);
if ( s < 0 ) {
if ( errno == ECONNABORTED || errno == EBADF ) {
@@ -117,7 +119,7 @@ namespace mongo {
if ( _buf == _cur )
return 0;
- int x = ::send( _port->sock , _buf , len() , 0 );
+ int x = _port->send( _buf , len() );
_cur = _buf;
return x;
}
@@ -136,23 +138,22 @@ namespace mongo {
class Ports {
set<MessagingPort*>& ports;
- boost::mutex& m;
+ mongo::mutex m;
public:
// we "new" this so it is still be around when other automatic global vars
// are being destructed during termination.
- Ports() : ports( *(new set<MessagingPort*>()) ),
- m( *(new boost::mutex()) ) { }
+ Ports() : ports( *(new set<MessagingPort*>()) ) {}
void closeAll() { \
- boostlock bl(m);
+ scoped_lock bl(m);
for ( set<MessagingPort*>::iterator i = ports.begin(); i != ports.end(); i++ )
(*i)->shutdown();
}
void insert(MessagingPort* p) {
- boostlock bl(m);
+ scoped_lock bl(m);
ports.insert(p);
}
void erase(MessagingPort* p) {
- boostlock bl(m);
+ scoped_lock bl(m);
ports.erase(p);
}
} ports;
@@ -263,7 +264,7 @@ again:
char *lenbuf = (char *) &len;
int lft = 4;
while ( 1 ) {
- int x = ::recv(sock, lenbuf, lft, 0);
+ int x = recv( lenbuf, lft );
if ( x == 0 ) {
DEV out() << "MessagingPort recv() conn closed? " << farEnd.toString() << endl;
m.reset();
@@ -286,7 +287,7 @@ again:
if ( len == -1 ) {
// Endian check from the database, after connecting, to see what mode server is running in.
unsigned foo = 0x10203040;
- int x = ::send(sock, (char *) &foo, 4, portSendFlags );
+ int x = send( (char *) &foo, 4 );
if ( x <= 0 ) {
log() << "MessagingPort endian send() " << OUTPUT_ERRNO << ' ' << farEnd.toString() << endl;
return false;
@@ -301,7 +302,7 @@ again:
stringstream ss;
ss << "HTTP/1.0 200 OK\r\nConnection: close\r\nContent-Type: text/plain\r\nContent-Length: " << msg.size() << "\r\n\r\n" << msg;
string s = ss.str();
- ::send( sock , s.c_str(), s.size(), 0 );
+ send( s.c_str(), s.size() );
return false;
}
log() << "bad recv() len: " << len << '\n';
@@ -321,7 +322,7 @@ again:
char *p = (char *) &md->id;
int left = len -4;
while ( 1 ) {
- int x = ::recv(sock, p, left, 0);
+ int x = recv( p, left );
if ( x == 0 ) {
DEV out() << "MessagingPort::recv(): conn closed? " << farEnd.toString() << endl;
m.reset();
@@ -376,6 +377,7 @@ again:
}
void MessagingPort::say(Message& toSend, int responseTo) {
+ assert( toSend.data );
mmm( out() << "* say() sock:" << this->sock << " thr:" << GetCurrentThreadId() << endl; )
toSend.data->id = nextMessageId();
toSend.data->responseTo = responseTo;
@@ -395,7 +397,7 @@ again:
}
if ( x == -100 )
- x = ::send(sock, (char*)toSend.data, toSend.data->len , portSendFlags );
+ x = send( (char*)toSend.data, toSend.data->len );
if ( x <= 0 ) {
log() << "MessagingPort say send() " << OUTPUT_ERRNO << ' ' << farEnd.toString() << endl;
@@ -404,6 +406,14 @@ again:
}
+ int MessagingPort::send( const char * data , const int len ){
+ return ::send( sock , data , len , portSendFlags );
+ }
+
+ int MessagingPort::recv( char * buf , int max ){
+ return ::recv( sock , buf , max , portRecvFlags );
+ }
+
void MessagingPort::piggyBack( Message& toSend , int responseTo ) {
if ( toSend.data->len > 1300 ) {
@@ -438,7 +448,7 @@ again:
} msgstart;
MSGID nextMessageId(){
- MSGID msgid = NextMsgId.atomicIncrement();
+ MSGID msgid = NextMsgId++;
if ( usingClientIds ){
msgid = msgid & 0xFFFF;
diff --git a/util/message.h b/util/message.h
index 8d6a46e..5dccaef 100644
--- a/util/message.h
+++ b/util/message.h
@@ -18,13 +18,14 @@
#pragma once
#include "../util/sock.h"
+#include "../util/atomic_int.h"
namespace mongo {
class Message;
class MessagingPort;
class PiggyBackData;
- typedef WrappingInt MSGID;
+ typedef AtomicUInt MSGID;
class Listener {
public:
@@ -73,6 +74,9 @@ namespace mongo {
void piggyBack( Message& toSend , int responseTo = -1 );
virtual unsigned remotePort();
+
+ int send( const char * data , const int len );
+ int recv( char * data , int max );
private:
int sock;
PiggyBackData * piggyBackData;
@@ -99,6 +103,24 @@ namespace mongo {
bool doesOpGetAResponse( int op );
+ inline const char * opToString( int op ){
+ switch ( op ){
+ case 0: return "none";
+ case opReply: return "reply";
+ case dbMsg: return "msg";
+ case dbUpdate: return "update";
+ case dbInsert: return "insert";
+ case dbQuery: return "query";
+ case dbGetMore: return "getmore";
+ case dbDelete: return "remove";
+ case dbKillCursors: return "killcursors";
+ default:
+ PRINT(op);
+ assert(0);
+ return "";
+ }
+ }
+
struct MsgData {
int len; /* len of the msg, including this field */
MSGID id; /* request/reply id's match... */
@@ -146,10 +168,14 @@ namespace mongo {
~Message() {
reset();
}
-
+
SockAddr from;
MsgData *data;
+ int operation() const {
+ return data->operation();
+ }
+
Message& operator=(Message& r) {
assert( data == 0 );
data = r.data;
@@ -175,9 +201,9 @@ namespace mongo {
void setData(int operation, const char *msgtxt) {
setData(operation, msgtxt, strlen(msgtxt)+1);
}
- void setData(int operation, const char *msgdata, int len) {
+ void setData(int operation, const char *msgdata, size_t len) {
assert(data == 0);
- int dataLen = len + sizeof(MsgData) - 4;
+ size_t dataLen = len + sizeof(MsgData) - 4;
MsgData *d = (MsgData *) malloc(dataLen);
memcpy(d->_data, msgdata, len);
d->len = fixEndian(dataLen);
diff --git a/util/message_server_asio.cpp b/util/message_server_asio.cpp
index 4d5fab0..7fca29a 100644
--- a/util/message_server_asio.cpp
+++ b/util/message_server_asio.cpp
@@ -27,23 +27,58 @@
#include "message.h"
#include "message_server.h"
-#include "../util/thread_pool.h"
+#include "../util/mvar.h"
using namespace boost;
using namespace boost::asio;
using namespace boost::asio::ip;
-//using namespace std;
namespace mongo {
+ class MessageServerSession;
+
namespace {
- ThreadPool tp;
+ class StickyThread{
+ public:
+ StickyThread()
+ : _thread(boost::ref(*this))
+ {}
+
+ ~StickyThread(){
+ _mss.put(boost::shared_ptr<MessageServerSession>());
+ _thread.join();
+ }
+
+ void ready(boost::shared_ptr<MessageServerSession> mss){
+ _mss.put(mss);
+ }
+
+ void operator() (){
+ boost::shared_ptr<MessageServerSession> mss;
+ while((mss = _mss.take())){ // intentionally not using ==
+ task(mss.get());
+ mss.reset();
+ }
+ }
+
+ private:
+ boost::thread _thread;
+ inline void task(MessageServerSession* mss); // must be defined after MessageServerSession
+
+ MVar<boost::shared_ptr<MessageServerSession> > _mss; // populated when given a task
+ };
+
+ vector<boost::shared_ptr<StickyThread> > thread_pool;
+ mongo::mutex tp_mutex; // this is only needed if io_service::run() is called from multiple threads
}
class MessageServerSession : public boost::enable_shared_from_this<MessageServerSession> , public AbstractMessagingPort {
public:
- MessageServerSession( MessageHandler * handler , io_service& ioservice ) : _handler( handler ) , _socket( ioservice ){
-
- }
+ MessageServerSession( MessageHandler * handler , io_service& ioservice )
+ : _handler( handler )
+ , _socket( ioservice )
+ , _portCache(0)
+ { }
+
~MessageServerSession(){
cout << "disconnect from: " << _socket.remote_endpoint() << endl;
}
@@ -81,7 +116,20 @@ namespace mongo {
}
void handleReadBody( const boost::system::error_code& error ){
- tp.schedule(&MessageServerSession::process, shared_from_this());
+ if (!_myThread){
+ mongo::mutex::scoped_lock(tp_mutex);
+ if (!thread_pool.empty()){
+ _myThread = thread_pool.back();
+ thread_pool.pop_back();
+ }
+ }
+
+ if (!_myThread) // pool is empty
+ _myThread.reset(new StickyThread());
+
+ assert(_myThread);
+
+ _myThread->ready(shared_from_this());
}
void process(){
@@ -98,6 +146,13 @@ namespace mongo {
}
void handleWriteDone( const boost::system::error_code& error ){
+ {
+ // return thread to pool after we have sent data to the client
+ mongo::mutex::scoped_lock(tp_mutex);
+ assert(_myThread);
+ thread_pool.push_back(_myThread);
+ _myThread.reset();
+ }
_cur.reset();
_reply.reset();
_startHeaderRead();
@@ -117,7 +172,9 @@ namespace mongo {
virtual unsigned remotePort(){
- return _socket.remote_endpoint().port();
+ if (!_portCache)
+ _portCache = _socket.remote_endpoint().port(); //this is expensive
+ return _portCache;
}
private:
@@ -134,7 +191,15 @@ namespace mongo {
MsgData _inHeader;
Message _cur;
Message _reply;
+
+ unsigned _portCache;
+
+ boost::shared_ptr<StickyThread> _myThread;
};
+
+ void StickyThread::task(MessageServerSession* mss){
+ mss->process();
+ }
class AsyncMessageServer : public MessageServer {
@@ -152,6 +217,7 @@ namespace mongo {
void run(){
cout << "AsyncMessageServer starting to listen on: " << _port << endl;
+ boost::thread other(boost::bind(&io_service::run, &_ioservice));
_ioservice.run();
cout << "AsyncMessageServer done listening on: " << _port << endl;
}
diff --git a/util/message_server_port.cpp b/util/message_server_port.cpp
index e5becc9..fa8f9e5 100644
--- a/util/message_server_port.cpp
+++ b/util/message_server_port.cpp
@@ -15,6 +15,8 @@
* limitations under the License.
*/
+#include "stdafx.h"
+
#ifndef USE_ASIO
#include "message.h"
diff --git a/util/miniwebserver.cpp b/util/miniwebserver.cpp
index b492153..61619d8 100644
--- a/util/miniwebserver.cpp
+++ b/util/miniwebserver.cpp
@@ -17,6 +17,7 @@
#include "stdafx.h"
#include "miniwebserver.h"
+#include "hex.h"
#include <pcrecpp.h>
@@ -81,12 +82,13 @@ namespace mongo {
return string( urlStart , (int)(end-urlStart) );
}
- void MiniWebServer::parseParams( map<string,string> & params , string query ) {
+ void MiniWebServer::parseParams( BSONObj & params , string query ) {
if ( query.size() == 0 )
return;
-
+
+ BSONObjBuilder b;
while ( query.size() ) {
-
+
string::size_type amp = query.find( "&" );
string cur;
@@ -103,9 +105,10 @@ namespace mongo {
if ( eq == string::npos )
continue;
- params[cur.substr(0,eq)] = cur.substr(eq+1);
+ b.append( urlDecode(cur.substr(0,eq)).c_str() , urlDecode(cur.substr(eq+1) ) );
}
- return;
+
+ params = b.obj();
}
string MiniWebServer::parseMethod( const char * headers ) {
@@ -203,7 +206,7 @@ namespace mongo {
void MiniWebServer::run() {
SockAddr from;
- while ( 1 ) {
+ while ( ! inShutdown() ) {
int s = accept(sock, (sockaddr *) &from.sa, &from.addressSize);
if ( s < 0 ) {
if ( errno == ECONNABORTED ) {
@@ -221,4 +224,20 @@ namespace mongo {
}
}
+ string MiniWebServer::urlDecode(const char* s){
+ stringstream out;
+ while(*s){
+ if (*s == '+'){
+ out << ' ';
+ }else if (*s == '%'){
+ out << fromHex(s+1);
+ s+=2;
+ }else{
+ out << *s;
+ }
+ s++;
+ }
+ return out.str();
+ }
+
} // namespace mongo
diff --git a/util/miniwebserver.h b/util/miniwebserver.h
index 27476d6..bdd2873 100644
--- a/util/miniwebserver.h
+++ b/util/miniwebserver.h
@@ -17,7 +17,9 @@
#pragma once
+#include "../stdafx.h"
#include "message.h"
+#include "../db/jsobj.h"
namespace mongo {
@@ -45,9 +47,12 @@ namespace mongo {
string parseURL( const char * buf );
string parseMethod( const char * headers );
string getHeader( const char * headers , string name );
- void parseParams( map<string,string> & params , string query );
+ void parseParams( BSONObj & params , string query );
static const char *body( const char *buf );
+ static string urlDecode(const char* s);
+ static string urlDecode(string s) {return urlDecode(s.c_str());}
+
private:
void accepted(int s, const SockAddr &from);
static bool fullReceive( const char *buf );
diff --git a/util/mmap.cpp b/util/mmap.cpp
index f3103d0..f6bbc73 100644
--- a/util/mmap.cpp
+++ b/util/mmap.cpp
@@ -17,20 +17,21 @@
#include "stdafx.h"
#include "mmap.h"
+#include "processinfo.h"
namespace mongo {
set<MemoryMappedFile*> mmfiles;
- boost::mutex mmmutex;
+ mongo::mutex mmmutex;
MemoryMappedFile::~MemoryMappedFile() {
close();
- boostlock lk( mmmutex );
+ scoped_lock lk( mmmutex );
mmfiles.erase(this);
}
void MemoryMappedFile::created(){
- boostlock lk( mmmutex );
+ scoped_lock lk( mmmutex );
mmfiles.insert(this);
}
@@ -54,7 +55,7 @@ namespace mongo {
long long MemoryMappedFile::totalMappedLength(){
unsigned long long total = 0;
- boostlock lk( mmmutex );
+ scoped_lock lk( mmmutex );
for ( set<MemoryMappedFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ )
total += (*i)->length();
@@ -64,7 +65,7 @@ namespace mongo {
int MemoryMappedFile::flushAll( bool sync ){
int num = 0;
- boostlock lk( mmmutex );
+ scoped_lock lk( mmmutex );
for ( set<MemoryMappedFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ){
num++;
MemoryMappedFile * mmf = *i;
@@ -92,4 +93,18 @@ namespace mongo {
return map( filename , i );
}
+ void printMemInfo( const char * where ){
+ cout << "mem info: ";
+ if ( where )
+ cout << where << " ";
+ ProcessInfo pi;
+ if ( ! pi.supported() ){
+ cout << " not supported" << endl;
+ return;
+ }
+
+ cout << "vsize: " << pi.getVirtualMemorySize() << " resident: " << pi.getResidentSize() << " mapped: " << ( MemoryMappedFile::totalMappedLength() / ( 1024 * 1024 ) ) << endl;
+ }
+
+
} // namespace mongo
diff --git a/util/mmap.h b/util/mmap.h
index ed4ca99..947364b 100644
--- a/util/mmap.h
+++ b/util/mmap.h
@@ -22,6 +22,10 @@ namespace mongo {
class MemoryMappedFile {
public:
+ enum Options {
+ SEQUENTIAL = 1
+ };
+
MemoryMappedFile();
~MemoryMappedFile(); /* closes the file if open */
void close();
@@ -32,7 +36,7 @@ namespace mongo {
/* Creates with length if DNE, otherwise uses existing file length,
passed length.
*/
- void* map(const char *filename, long &length);
+ void* map(const char *filename, long &length, int options = 0 );
void flush(bool sync);
@@ -58,6 +62,7 @@ namespace mongo {
void *view;
long len;
};
-
+
+ void printMemInfo( const char * where );
} // namespace mongo
diff --git a/util/mmap_mm.cpp b/util/mmap_mm.cpp
index aa9b275..9cffad5 100644
--- a/util/mmap_mm.cpp
+++ b/util/mmap_mm.cpp
@@ -31,14 +31,13 @@ namespace mongo {
void MemoryMappedFile::close() {
if ( view )
- delete( view );
+ free( view );
view = 0;
len = 0;
}
- void* MemoryMappedFile::map(const char *filename, size_t length) {
- path p( filename );
-
+ void* MemoryMappedFile::map(const char *filename, long& length , int options ) {
+ assert( length );
view = malloc( length );
return view;
}
diff --git a/util/mmap_posix.cpp b/util/mmap_posix.cpp
index 1237220..836373d 100644
--- a/util/mmap_posix.cpp
+++ b/util/mmap_posix.cpp
@@ -49,7 +49,7 @@ namespace mongo {
#define O_NOATIME 0
#endif
- void* MemoryMappedFile::map(const char *filename, long &length) {
+ void* MemoryMappedFile::map(const char *filename, long &length, int options) {
// length may be updated by callee.
theFileAllocator().allocateAsap( filename, length );
len = length;
@@ -79,9 +79,19 @@ namespace mongo {
}
return 0;
}
+
+#if defined(__sunos__)
+#warning madvise not supported on solaris yet
+#else
+ if ( options & SEQUENTIAL ){
+ if ( madvise( view , length , MADV_SEQUENTIAL ) ){
+ out() << " madvise failed for " << filename << " " << OUTPUT_ERRNO << endl;
+ }
+ }
+#endif
return view;
}
-
+
void MemoryMappedFile::flush(bool sync) {
if ( view == 0 || fd == 0 )
return;
diff --git a/util/mmap_win.cpp b/util/mmap_win.cpp
index 8a0d306..d831d66 100644
--- a/util/mmap_win.cpp
+++ b/util/mmap_win.cpp
@@ -49,7 +49,7 @@ namespace mongo {
unsigned mapped = 0;
- void* MemoryMappedFile::map(const char *_filename, long &length) {
+ void* MemoryMappedFile::map(const char *_filename, long &length, int options) {
/* big hack here: Babble uses db names with colons. doesn't seem to work on windows. temporary perhaps. */
char filename[256];
strncpy(filename, _filename, 255);
@@ -69,9 +69,13 @@ namespace mongo {
updateLength( filename, length );
std::wstring filenamew = toWideString(filename);
+ DWORD createOptions = FILE_ATTRIBUTE_NORMAL;
+ if ( options & SEQUENTIAL )
+ createOptions |= FILE_FLAG_SEQUENTIAL_SCAN;
+
fd = CreateFile(
filenamew.c_str(), GENERIC_WRITE | GENERIC_READ, FILE_SHARE_READ,
- NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
+ NULL, OPEN_ALWAYS, createOptions , NULL);
if ( fd == INVALID_HANDLE_VALUE ) {
out() << "Create/OpenFile failed " << filename << ' ' << GetLastError() << endl;
return 0;
@@ -95,7 +99,21 @@ namespace mongo {
return view;
}
- void MemoryMappedFile::flush(bool) {
- }
+ void MemoryMappedFile::flush(bool sync) {
+ uassert(13056, "Async flushing not supported on windows", sync);
+ if (!view || !fd) return;
+
+ bool success = FlushViewOfFile(view, 0); // 0 means whole mapping
+ if (!success){
+ int err = GetLastError();
+ out() << "FlushViewOfFile failed " << err << endl;
+ }
+
+ success = FlushFileBuffers(fd);
+ if (!success){
+ int err = GetLastError();
+ out() << "FlushFileBuffers failed " << err << endl;
+ }
+ }
}
diff --git a/util/optime.h b/util/optime.h
index b7d4f61..8b26434 100644
--- a/util/optime.h
+++ b/util/optime.h
@@ -20,15 +20,24 @@
#include "../db/concurrency.h"
namespace mongo {
+ void exitCleanly( int code );
/* Operation sequence #. A combination of current second plus an ordinal value.
*/
+ struct ClockSkewException : public DBException {
+ virtual const char* what() const throw() { return "clock skew exception"; }
+ virtual int getCode(){ return 20001; }
+ };
+
#pragma pack(4)
class OpTime {
unsigned i;
unsigned secs;
static OpTime last;
public:
+ static void setLast(const Date_t &date) {
+ last = OpTime(date);
+ }
unsigned getSecs() const {
return secs;
}
@@ -49,6 +58,20 @@ namespace mongo {
static OpTime now() {
unsigned t = (unsigned) time(0);
// DEV assertInWriteLock();
+ if ( t < last.secs ){
+ bool toLog = false;
+ ONCE toLog = true;
+ RARELY toLog = true;
+ if ( last.i & 0x80000000 )
+ toLog = true;
+ if ( toLog )
+ log() << "clock skew detected prev: " << last.secs << " now: " << t << " trying to handle..." << endl;
+ if ( last.i & 0x80000000 ) {
+ log() << "ERROR Large clock skew detected, shutting down" << endl;
+ throw ClockSkewException();
+ }
+ t = last.secs;
+ }
if ( last.secs == t ) {
last.i++;
return last;
diff --git a/util/processinfo.h b/util/processinfo.h
index 83c3bcf..b7bc90d 100644
--- a/util/processinfo.h
+++ b/util/processinfo.h
@@ -52,6 +52,9 @@ namespace mongo {
bool supported();
+ bool blockCheckSupported();
+ bool blockInMemory( char * start );
+
private:
pid_t _pid;
};
diff --git a/util/processinfo_darwin.cpp b/util/processinfo_darwin.cpp
index 904f967..206c270 100644
--- a/util/processinfo_darwin.cpp
+++ b/util/processinfo_darwin.cpp
@@ -15,8 +15,9 @@
* limitations under the License.
*/
+#include "../stdafx.h"
#include "processinfo.h"
-
+#include "log.h"
#include <mach/task_info.h>
@@ -29,6 +30,9 @@
#include <mach/shared_memory_server.h>
#include <iostream>
+#include <sys/types.h>
+#include <sys/mman.h>
+
using namespace std;
namespace mongo {
@@ -63,7 +67,7 @@ namespace mongo {
cout << "error getting task_info: " << result << endl;
return 0;
}
- return (int)((double)ti.virtual_size / (1024.0 * 1024 * 2 ) );
+ return (int)((double)ti.virtual_size / (1024.0 * 1024 ) );
}
int ProcessInfo::getResidentSize(){
@@ -92,4 +96,22 @@ namespace mongo {
void ProcessInfo::getExtraInfo(BSONObjBuilder& info) {}
+ bool ProcessInfo::blockCheckSupported(){
+ return true;
+ }
+
+ bool ProcessInfo::blockInMemory( char * start ){
+ static long pageSize = 0;
+ if ( pageSize == 0 ){
+ pageSize = sysconf( _SC_PAGESIZE );
+ }
+ start = start - ( (unsigned long long)start % pageSize );
+ char x = 0;
+ if ( mincore( start , 128 , &x ) ){
+ log() << "mincore failed: " << OUTPUT_ERRNO << endl;
+ return 1;
+ }
+ return x & 0x1;
+ }
+
}
diff --git a/util/processinfo_linux2.cpp b/util/processinfo_linux2.cpp
index 3e00c06..eaaee09 100644
--- a/util/processinfo_linux2.cpp
+++ b/util/processinfo_linux2.cpp
@@ -21,6 +21,8 @@
#include <stdio.h>
#include <malloc.h>
#include <db/jsobj.h>
+#include <unistd.h>
+#include <sys/mman.h>
using namespace std;
@@ -212,4 +214,23 @@ namespace mongo {
info.append("page_faults", (int)p._maj_flt);
}
+ bool ProcessInfo::blockCheckSupported(){
+ return true;
+ }
+
+ bool ProcessInfo::blockInMemory( char * start ){
+ static long pageSize = 0;
+ if ( pageSize == 0 ){
+ pageSize = sysconf( _SC_PAGESIZE );
+ }
+ start = start - ( (unsigned long long)start % pageSize );
+ unsigned char x = 0;
+ if ( mincore( start , 128 , &x ) ){
+ log() << "mincore failed: " << OUTPUT_ERRNO << endl;
+ return 1;
+ }
+ return x & 0x1;
+ }
+
+
}
diff --git a/util/processinfo_none.cpp b/util/processinfo_none.cpp
index 57f4ca3..9af1766 100644
--- a/util/processinfo_none.cpp
+++ b/util/processinfo_none.cpp
@@ -42,5 +42,14 @@ namespace mongo {
}
void ProcessInfo::getExtraInfo(BSONObjBuilder& info) {}
+
+ bool ProcessInfo::blockCheckSupported(){
+ return false;
+ }
+
+ bool ProcessInfo::blockInMemory( char * start ){
+ assert(0);
+ return true;
+ }
}
diff --git a/util/processinfo_win32.cpp b/util/processinfo_win32.cpp
index 0f0bf2e..0705fcb 100644
--- a/util/processinfo_win32.cpp
+++ b/util/processinfo_win32.cpp
@@ -61,4 +61,14 @@ namespace mongo {
}
void ProcessInfo::getExtraInfo(BSONObjBuilder& info) {}
+
+ bool ProcessInfo::blockCheckSupported(){
+ return false;
+ }
+
+ bool ProcessInfo::blockInMemory( char * start ){
+ assert(0);
+ return true;
+ }
+
}
diff --git a/util/queue.h b/util/queue.h
index 8f4fbaf..d48e012 100644
--- a/util/queue.h
+++ b/util/queue.h
@@ -30,18 +30,18 @@ namespace mongo {
template<typename T> class BlockingQueue : boost::noncopyable {
public:
void push(T const& t){
- boostlock l( _lock );
+ scoped_lock l( _lock );
_queue.push( t );
_condition.notify_one();
}
bool empty() const {
- boostlock l( _lock );
+ scoped_lock l( _lock );
return _queue.empty();
}
bool tryPop( T & t ){
- boostlock l( _lock );
+ scoped_lock l( _lock );
if ( _queue.empty() )
return false;
@@ -53,9 +53,9 @@ namespace mongo {
T blockingPop(){
- boostlock l( _lock );
+ scoped_lock l( _lock );
while( _queue.empty() )
- _condition.wait( l );
+ _condition.wait( l.boost() );
T t = _queue.front();
_queue.pop();
@@ -65,7 +65,7 @@ namespace mongo {
private:
std::queue<T> _queue;
- mutable boost::mutex _lock;
+ mutable mongo::mutex _lock;
boost::condition _condition;
};
diff --git a/util/sock.cpp b/util/sock.cpp
index 5172692..5beac68 100644
--- a/util/sock.cpp
+++ b/util/sock.cpp
@@ -20,14 +20,14 @@
namespace mongo {
- static boost::mutex sock_mutex;
+ static mongo::mutex sock_mutex;
string hostbyname(const char *hostname) {
static string unknown = "0.0.0.0";
if ( unknown == hostname )
return unknown;
- boostlock lk(sock_mutex);
+ scoped_lock lk(sock_mutex);
#if defined(_WIN32)
if( inet_addr(hostname) != INADDR_NONE )
return hostname;
diff --git a/util/sock.h b/util/sock.h
index 5798a71..ee7a7ae 100644
--- a/util/sock.h
+++ b/util/sock.h
@@ -245,25 +245,25 @@ namespace mongo {
}
void add( int sock ){
- boostlock lk( _mutex );
+ scoped_lock lk( _mutex );
_sockets->insert( sock );
}
void remove( int sock ){
- boostlock lk( _mutex );
+ scoped_lock lk( _mutex );
_sockets->erase( sock );
}
void closeAll(){
set<int>* s;
{
- boostlock lk( _mutex );
+ scoped_lock lk( _mutex );
s = _sockets;
_sockets = new set<int>();
}
for ( set<int>::iterator i=s->begin(); i!=s->end(); i++ ){
int sock = *i;
- log() << "going to close listening socket: " << sock << endl;
+ log() << "\t going to close listening socket: " << sock << endl;
closesocket( sock );
}
@@ -272,7 +272,7 @@ namespace mongo {
static ListeningSockets* get();
private:
- boost::mutex _mutex;
+ mongo::mutex _mutex;
set<int>* _sockets;
static ListeningSockets* _instance;
};
diff --git a/util/thread_pool.cpp b/util/thread_pool.cpp
index b95bc1d..77d0d05 100644
--- a/util/thread_pool.cpp
+++ b/util/thread_pool.cpp
@@ -77,7 +77,7 @@ ThreadPool::ThreadPool(int nThreads)
: _tasksRemaining(0)
, _nThreads(nThreads)
{
- boostlock lock(_mutex);
+ scoped_lock lock(_mutex);
while (nThreads-- > 0){
Worker* worker = new Worker(*this);
_freeWorkers.push_front(worker);
@@ -99,14 +99,14 @@ ThreadPool::~ThreadPool(){
}
void ThreadPool::join(){
- boostlock lock(_mutex);
+ scoped_lock lock(_mutex);
while(_tasksRemaining){
- _condition.wait(lock);
+ _condition.wait(lock.boost());
}
}
void ThreadPool::schedule(Task task){
- boostlock lock(_mutex);
+ scoped_lock lock(_mutex);
_tasksRemaining++;
@@ -120,7 +120,7 @@ void ThreadPool::schedule(Task task){
// should only be called by a worker from the worker thread
void ThreadPool::task_done(Worker* worker){
- boostlock lock(_mutex);
+ scoped_lock lock(_mutex);
if (!_tasks.empty()){
worker->set_task(_tasks.front());
diff --git a/util/thread_pool.h b/util/thread_pool.h
index 91c2969..d891d7d 100644
--- a/util/thread_pool.h
+++ b/util/thread_pool.h
@@ -62,7 +62,7 @@ namespace threadpool {
int tasks_remaining() { return _tasksRemaining; }
private:
- boost::mutex _mutex;
+ mongo::mutex _mutex;
boost::condition _condition;
list<Worker*> _freeWorkers; //used as LIFO stack (always front)
diff --git a/util/top.cpp b/util/top.cpp
deleted file mode 100644
index 98d9598..0000000
--- a/util/top.cpp
+++ /dev/null
@@ -1,18 +0,0 @@
-// top.cpp
-
-#include "stdafx.h"
-#include "top.h"
-
-namespace mongo {
-
- Top::T Top::_snapshotStart = Top::currentTime();
- Top::D Top::_snapshotDuration;
- Top::UsageMap Top::_totalUsage;
- Top::UsageMap Top::_snapshotA;
- Top::UsageMap Top::_snapshotB;
- Top::UsageMap &Top::_snapshot = Top::_snapshotA;
- Top::UsageMap &Top::_nextSnapshot = Top::_snapshotB;
- boost::mutex Top::topMutex;
-
-
-}
diff --git a/util/util.cpp b/util/util.cpp
index 78d8d52..8ae00f3 100644
--- a/util/util.cpp
+++ b/util/util.cpp
@@ -18,7 +18,6 @@
#include "stdafx.h"
#include "goodies.h"
#include "unittest.h"
-#include "top.h"
#include "file_allocator.h"
#include "optime.h"
@@ -35,7 +34,7 @@ namespace mongo {
const char * (*getcurns)() = default_getcurns;
int logLevel = 0;
- boost::mutex &Logstream::mutex = *( new boost::mutex );
+ mongo::mutex Logstream::mutex;
int Logstream::doneSetup = Logstream::magicNumber();
bool goingAway = false;
@@ -113,9 +112,9 @@ namespace mongo {
#if defined(_WIN32)
(std::cout << now << " " << s).flush();
#else
- assert( write( STDOUT_FILENO, now, 20 ) > 0 );
- assert( write( STDOUT_FILENO, " ", 1 ) > 0 );
- assert( write( STDOUT_FILENO, s.c_str(), s.length() ) > 0 );
+ write( STDOUT_FILENO, now, 20 );
+ write( STDOUT_FILENO, " ", 1 );
+ write( STDOUT_FILENO, s.c_str(), s.length() );
fsync( STDOUT_FILENO );
#endif
}
@@ -133,5 +132,12 @@ namespace mongo {
ss << "db version v" << versionString << ", pdfile version " << VERSION << "." << VERSION_MINOR;
return ss.str();
}
-
+
+ ostream& operator<<( ostream &s, const ThreadSafeString &o ){
+ s << (string)o;
+ return s;
+ }
+
+ bool __destroyingStatics = false;
+
} // namespace mongo