summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAntonin Kral <a.kral@bobek.cz>2010-09-24 19:01:03 +0200
committerAntonin Kral <a.kral@bobek.cz>2010-09-24 19:01:03 +0200
commit0ad0c09511a04ebe837f2acb859d47f2aa4e038a (patch)
tree109babcb556f6c5884b77853120717f0617c7a1e
parent03e58f81cad8dd4cfcd1530f327116f0cff6ceb3 (diff)
downloadmongodb-0ad0c09511a04ebe837f2acb859d47f2aa4e038a.tar.gz
Imported Upstream version 1.6.3
-rw-r--r--SConstruct2
-rw-r--r--bson/bsonelement.h2
-rw-r--r--bson/bsonobj.h2
-rw-r--r--client/dbclient.cpp2
-rw-r--r--client/distlock.cpp2
-rw-r--r--client/parallel.cpp38
-rw-r--r--client/parallel.h7
-rw-r--r--db/client.cpp45
-rw-r--r--db/client.h50
-rw-r--r--db/clientcursor.h9
-rw-r--r--db/cloner.cpp17
-rw-r--r--db/commands.h4
-rw-r--r--db/curop.h5
-rw-r--r--db/db.cpp16
-rw-r--r--db/db.vcxproj20
-rwxr-xr-xdb/db.vcxproj.filters62
-rw-r--r--db/db_10.sln4
-rw-r--r--db/dbcommands.cpp88
-rw-r--r--db/dbcommands_generic.cpp3
-rw-r--r--db/instance.cpp33
-rw-r--r--db/lasterror.cpp7
-rw-r--r--db/matcher.cpp2
-rw-r--r--db/oplog.cpp6
-rw-r--r--db/oplog.h2
-rw-r--r--db/pdfile.cpp48
-rw-r--r--db/pdfile.h6
-rw-r--r--db/query.cpp7
-rw-r--r--db/queryoptimizer.cpp2
-rw-r--r--db/queryutil.cpp30
-rw-r--r--db/repl.cpp22
-rw-r--r--db/repl/consensus.cpp12
-rw-r--r--db/repl/health.cpp34
-rw-r--r--db/repl/health.h2
-rw-r--r--db/repl/heartbeat.cpp27
-rw-r--r--db/repl/manager.cpp30
-rw-r--r--db/repl/replset_commands.cpp18
-rw-r--r--db/repl/rs.cpp138
-rw-r--r--db/repl/rs.h74
-rw-r--r--db/repl/rs_config.cpp57
-rw-r--r--db/repl/rs_config.h13
-rw-r--r--db/repl/rs_initialsync.cpp2
-rw-r--r--db/repl/rs_member.h19
-rw-r--r--db/repl/rs_rollback.cpp291
-rw-r--r--db/repl/rs_sync.cpp122
-rw-r--r--db/security_commands.cpp4
-rw-r--r--db/stats/snapshots.cpp2
-rw-r--r--dbtests/basictests.cpp2
-rw-r--r--dbtests/jstests.cpp2
-rw-r--r--debian/changelog8
-rw-r--r--debian/files1
-rw-r--r--debian/mongodb.upstart15
-rw-r--r--debian/preinst37
-rw-r--r--doxygenConfig2
-rw-r--r--jstests/datasize3.js32
-rw-r--r--jstests/disk/diskfull.js5
-rw-r--r--jstests/evalc.js32
-rw-r--r--jstests/geo_circle1.js2
-rw-r--r--jstests/geo_queryoptimizer.js27
-rw-r--r--jstests/mr_sort.js44
-rw-r--r--jstests/numberlong.js28
-rw-r--r--jstests/remove3.js2
-rw-r--r--jstests/remove_justone.js16
-rw-r--r--jstests/repl/repl10.js12
-rw-r--r--jstests/repl/repl12.js47
-rw-r--r--jstests/repl/replacePeer2.js11
-rw-r--r--jstests/repl/snapshot2.js47
-rw-r--r--jstests/replsets/randomcommands1.js29
-rw-r--r--jstests/replsets/replset1.js3
-rw-r--r--jstests/replsets/replset2.js242
-rw-r--r--jstests/replsets/replset4.js71
-rw-r--r--jstests/replsets/replset5.js72
-rw-r--r--jstests/replsets/replset_remove_node.js17
-rw-r--r--jstests/replsets/rollback.js30
-rw-r--r--jstests/replsets/rollback2.js8
-rwxr-xr-xjstests/replsets/rollback3.js224
-rw-r--r--jstests/replsets/sync1.js353
-rwxr-xr-xjstests/replsets/two_initsync.js93
-rw-r--r--jstests/sharding/features3.js2
-rw-r--r--jstests/sharding/shard3.js20
-rw-r--r--lib/libboost_thread-gcc41-mt-d-1_34_1.abin0 -> 692920 bytes
-rw-r--r--rpm/mongo.spec2
-rw-r--r--s/chunk.cpp21
-rw-r--r--s/chunk.h5
-rw-r--r--s/d_logic.cpp4
-rw-r--r--s/d_logic.h2
-rw-r--r--s/d_migrate.cpp6
-rw-r--r--s/d_split.cpp16
-rw-r--r--s/d_state.cpp11
-rw-r--r--s/s_only.cpp5
-rw-r--r--s/shardconnection.cpp23
-rw-r--r--s/strategy_shard.cpp4
-rw-r--r--s/util.h3
-rw-r--r--scripting/engine.cpp4
-rw-r--r--scripting/engine_spidermonkey.cpp3
-rw-r--r--scripting/sm_db.cpp49
-rw-r--r--scripting/v8_db.cpp22
-rw-r--r--shell/collection.js7
-rw-r--r--shell/mongo_vstudio.cpp3532
-rw-r--r--shell/servers.js26
-rw-r--r--shell/shell_utils.cpp2
-rw-r--r--tools/stat.cpp2
-rw-r--r--util/concurrency/mutex.h2
-rw-r--r--util/concurrency/task.cpp11
-rw-r--r--util/concurrency/vars.cpp4
-rw-r--r--util/goodies.h8
-rw-r--r--util/log.cpp6
-rw-r--r--util/log.h77
-rw-r--r--util/message.cpp41
-rw-r--r--util/message.h34
-rw-r--r--util/mmap.h4
-rwxr-xr-xutil/mongoutils/README12
-rw-r--r--util/optime.h2
-rw-r--r--util/sock.h15
-rw-r--r--util/util.cpp20
-rw-r--r--util/version.cpp29
-rw-r--r--util/version.h2
116 files changed, 4238 insertions, 2702 deletions
diff --git a/SConstruct b/SConstruct
index b759094..e3046ff 100644
--- a/SConstruct
+++ b/SConstruct
@@ -1243,7 +1243,7 @@ elif not onlyServer:
shellEnv["CPPPATH"].remove( "/usr/64/include" )
shellEnv["LIBPATH"].remove( "/usr/64/lib" )
shellEnv.Append( CPPPATH=filterExists(["/sw/include" , "/opt/local/include"]) )
- shellEnv.Append( LIBPATH=filterExists(["/sw/lib/", "/opt/local/lib" , "/usr/lib"]) )
+ shellEnv.Append( LIBPATH=filterExists(["/sw/lib/", "/opt/local/lib" , "/usr/lib", "/usr/local/lib" ]) )
l = shellEnv["LIBS"]
diff --git a/bson/bsonelement.h b/bson/bsonelement.h
index 2bbc640..534c773 100644
--- a/bson/bsonelement.h
+++ b/bson/bsonelement.h
@@ -511,7 +511,7 @@ private:
}
}
- /** Retrieve int value for the element safely. Zero returned if not a number. */
+ /** Retrieve int value for the element safely. Zero returned if not a number. Converted to int if another numeric type. */
inline int BSONElement::numberInt() const {
switch( type() ) {
case NumberDouble:
diff --git a/bson/bsonobj.h b/bson/bsonobj.h
index 0e99f28..a802526 100644
--- a/bson/bsonobj.h
+++ b/bson/bsonobj.h
@@ -40,7 +40,7 @@ namespace mongo {
BSON object format:
- \code
+ code
<unsigned totalSize> {<byte BSONType><cstring FieldName><Data>}* EOO
totalSize includes itself.
diff --git a/client/dbclient.cpp b/client/dbclient.cpp
index 26b1c26..aa9b7ae 100644
--- a/client/dbclient.cpp
+++ b/client/dbclient.cpp
@@ -502,7 +502,7 @@ namespace mongo {
if ( !p->connect(*server) ) {
stringstream ss;
- ss << "couldn't connect to server " << _serverString << '}';
+ ss << "couldn't connect to server " << _serverString;
errmsg = ss.str();
failed = true;
return false;
diff --git a/client/distlock.cpp b/client/distlock.cpp
index 245eb7e..05e54c0 100644
--- a/client/distlock.cpp
+++ b/client/distlock.cpp
@@ -47,6 +47,8 @@ namespace mongo {
}
void distLockPingThread( ConnectionString addr ){
+ setThreadName( "LockPinger" );
+
static int loops = 0;
while( ! inShutdown() ){
try {
diff --git a/client/parallel.cpp b/client/parallel.cpp
index eeadb89..92d1b04 100644
--- a/client/parallel.cpp
+++ b/client/parallel.cpp
@@ -452,37 +452,31 @@ namespace mongo {
}
bool Future::CommandResult::join(){
- while ( ! _done )
- sleepmicros( 50 );
+ _thr->join();
+ assert( _done );
return _ok;
}
- void Future::commandThread(){
- assert( _grab );
- shared_ptr<CommandResult> res = *_grab;
- _grab = 0;
-
- ScopedDbConnection conn( res->_server );
- res->_ok = conn->runCommand( res->_db , res->_cmd , res->_res );
+ void Future::commandThread( shared_ptr<CommandResult> res ){
+ setThreadName( "future" );
+
+ try {
+ ScopedDbConnection conn( res->_server );
+ res->_ok = conn->runCommand( res->_db , res->_cmd , res->_res );
+ conn.done();
+ }
+ catch ( std::exception& e ){
+ error() << "Future::commandThread exception: " << e.what() << endl;
+ res->_ok = false;
+ }
res->_done = true;
- conn.done();
}
shared_ptr<Future::CommandResult> Future::spawnCommand( const string& server , const string& db , const BSONObj& cmd ){
- shared_ptr<Future::CommandResult> res;
- res.reset( new Future::CommandResult( server , db , cmd ) );
-
- _grab = &res;
-
- boost::thread thr( Future::commandThread );
-
- while ( _grab )
- sleepmicros(2);
-
+ shared_ptr<Future::CommandResult> res( new Future::CommandResult( server , db , cmd ) );
+ res->_thr.reset( new boost::thread( boost::bind( Future::commandThread , res ) ) );
return res;
}
-
- shared_ptr<Future::CommandResult> * Future::_grab;
}
diff --git a/client/parallel.h b/client/parallel.h
index b60190a..603cfe7 100644
--- a/client/parallel.h
+++ b/client/parallel.h
@@ -274,7 +274,7 @@ namespace mongo {
string _db;
BSONObj _cmd;
- boost::thread _thr;
+ scoped_ptr<boost::thread> _thr;
BSONObj _res;
bool _done;
@@ -283,12 +283,9 @@ namespace mongo {
friend class Future;
};
- static void commandThread();
+ static void commandThread( shared_ptr<CommandResult> res );
static shared_ptr<CommandResult> spawnCommand( const string& server , const string& db , const BSONObj& cmd );
-
- private:
- static shared_ptr<CommandResult> * _grab;
};
diff --git a/db/client.cpp b/db/client.cpp
index 9781041..f9653f5 100644
--- a/db/client.cpp
+++ b/db/client.cpp
@@ -35,16 +35,18 @@
namespace mongo {
+ Client* Client::syncThread;
mongo::mutex Client::clientsMutex("clientsMutex");
set<Client*> Client::clients; // always be in clientsMutex when manipulating this
boost::thread_specific_ptr<Client> currentClient;
- Client::Client(const char *desc) :
+ Client::Client(const char *desc, MessagingPort *p) :
_context(0),
_shutdown(false),
_desc(desc),
_god(0),
- _lastOp(0)
+ _lastOp(0),
+ _mp(p)
{
_curOp = new CurOp( this );
scoped_lock bl(clientsMutex);
@@ -52,15 +54,21 @@ namespace mongo {
}
Client::~Client() {
- delete _curOp;
_god = 0;
if ( _context )
- cout << "ERROR: Client::~Client _context should be NULL: " << _desc << endl;
- if ( !_shutdown )
- cout << "ERROR: Client::shutdown not called: " << _desc << endl;
- }
+ error() << "Client::~Client _context should be null but is not; client:" << _desc << endl;
+ if ( ! _shutdown ) {
+ error() << "Client::shutdown not called: " << _desc << endl;
+ }
+
+ scoped_lock bl(clientsMutex);
+ if ( ! _shutdown )
+ clients.erase(this);
+ delete _curOp;
+ }
+
void Client::_dropns( const string& ns ){
Top::global.collectionDropped( ns );
@@ -75,7 +83,7 @@ namespace mongo {
dropCollection( ns , err , b );
}
catch ( ... ){
- log() << "error dropping temp collection: " << ns << endl;
+ warning() << "error dropping temp collection: " << ns << endl;
}
}
@@ -196,12 +204,18 @@ namespace mongo {
if ( doauth )
_auth( lockState );
- if ( _client->_curOp->getOp() != dbGetMore ){ // getMore's are special and should be handled else where
+ switch ( _client->_curOp->getOp() ){
+ case dbGetMore: // getMore's are special and should be handled else where
+ case dbUpdate: // update & delete check shard version in instance.cpp, so don't check here as well
+ case dbDelete:
+ break;
+ default: {
string errmsg;
- if ( ! shardVersionOk( _ns , errmsg ) ){
+ if ( ! shardVersionOk( _ns , lockState > 0 , errmsg ) ){
msgasserted( StaleConfigInContextCode , (string)"[" + _ns + "] shard version not ok in Client::Context: " + errmsg );
}
}
+ }
}
void Client::Context::_auth( int lockState ){
@@ -237,7 +251,7 @@ namespace mongo {
string sayClientState(){
Client* c = currentClient.get();
- if ( ! c )
+ if ( !c )
return "no client";
return c->toString();
}
@@ -259,6 +273,15 @@ namespace mongo {
}
}
+ CurOp::~CurOp(){
+ if ( _wrapped ){
+ scoped_lock bl(Client::clientsMutex);
+ _client->_curOp = _wrapped;
+ }
+
+ _client = 0;
+ }
+
BSONObj CurOp::query( bool threadSafe ) {
if( querySize() == 1 ) {
return _tooBig;
diff --git a/db/client.h b/db/client.h
index 2456d7f..d0600e3 100644
--- a/db/client.h
+++ b/db/client.h
@@ -29,27 +29,33 @@
#include "namespace.h"
#include "lasterror.h"
#include "stats/top.h"
-//#include "repl/rs.h"
namespace mongo {
extern class ReplSet *theReplSet;
-
class AuthenticationInfo;
class Database;
class CurOp;
class Command;
class Client;
+ class MessagingPort;
extern boost::thread_specific_ptr<Client> currentClient;
class Client : boost::noncopyable {
public:
+ static Client *syncThread;
+ void iAmSyncThread() {
+ wassert( syncThread == 0 );
+ syncThread = this;
+ }
+ bool isSyncThread() const { return this == syncThread; } // true if this client is the replication secondary pull thread
+
static mongo::mutex clientsMutex;
static set<Client*> clients; // always be in clientsMutex when manipulating this
-
static int recommendedYieldMicros( int * writers = 0 , int * readers = 0 );
+ /* set _god=true temporarily, safely */
class GodScope {
bool _prev;
public:
@@ -148,9 +154,11 @@ namespace mongo {
}
friend class CurOp;
- };
+ }; // class Client::Context
private:
+ void _dropns( const string& ns );
+
CurOp * _curOp;
Context * _context;
bool _shutdown;
@@ -162,9 +170,9 @@ namespace mongo {
BSONObj _handshake;
BSONObj _remoteId;
- void _dropns( const string& ns );
-
public:
+ MessagingPort * const _mp;
+
string clientAddress() const;
AuthenticationInfo * getAuthenticationInfo(){ return &_ai; }
bool isAdmin() { return _ai.isAuthorized( "admin" ); }
@@ -174,24 +182,19 @@ namespace mongo {
const char *ns() const { return _context->ns(); }
const char *desc() const { return _desc; }
- Client(const char *desc);
+ Client(const char *desc, MessagingPort *p = 0);
~Client();
void addTempCollection( const string& ns );
void _invalidateDB(const string& db);
static void invalidateDB(const string& db);
-
static void invalidateNS( const string& ns );
- void setLastOp( ReplTime op ) {
- _lastOp = op;
- }
-
- ReplTime getLastOp() const {
- return _lastOp;
- }
+ void setLastOp( ReplTime op ) { _lastOp = op; }
+ ReplTime getLastOp() const { return _lastOp; }
+ /* report what the last operation was. used by getlasterror */
void appendLastOp( BSONObjBuilder& b ) {
if( theReplSet ) {
b.append("lastOp" , (long long) _lastOp);
@@ -206,14 +209,13 @@ namespace mongo {
/* each thread which does db operations has a Client object in TLS.
call this when your thread starts.
*/
- static void initThread(const char *desc);
+ static Client& initThread(const char *desc, MessagingPort *mp = 0);
/*
this has to be called as the client goes away, but before thread termination
@return true if anything was done
*/
bool shutdown();
-
/* this is for map/reduce writes */
bool isGod() const { return _god; }
@@ -221,13 +223,12 @@ namespace mongo {
friend class CurOp;
string toString() const;
-
void gotHandshake( const BSONObj& o );
-
BSONObj getRemoteID() const { return _remoteId; }
BSONObj getHandshake() const { return _handshake; }
};
+ /** get the Client object for this thread. */
inline Client& cc() {
Client * c = currentClient.get();
assert( c );
@@ -237,11 +238,13 @@ namespace mongo {
/* each thread which does db operations has a Client object in TLS.
call this when your thread starts.
*/
- inline void Client::initThread(const char *desc) {
+ inline Client& Client::initThread(const char *desc, MessagingPort *mp) {
setThreadName(desc);
assert( currentClient.get() == 0 );
- currentClient.reset( new Client(desc) );
+ Client *c = new Client(desc, mp);
+ currentClient.reset(c);
mongo::lastError.initThread();
+ return *c;
}
inline Client::GodScope::GodScope(){
@@ -276,8 +279,5 @@ namespace mongo {
string sayClientState();
- inline bool haveClient(){
- return currentClient.get() > 0;
- }
+ inline bool haveClient() { return currentClient.get() > 0; }
};
-
diff --git a/db/clientcursor.h b/db/clientcursor.h
index 6f79dcf..b895c17 100644
--- a/db/clientcursor.h
+++ b/db/clientcursor.h
@@ -138,17 +138,18 @@ namespace mongo {
/*const*/ CursorId cursorid;
const string ns;
const shared_ptr<Cursor> c;
- int pos; // # objects into the cursor so far
- BSONObj query;
+ int pos; // # objects into the cursor so far
+ const BSONObj query; // used for logging diags only; optional in constructor
const int _queryOptions; // see enum QueryOptions dbclient.h
OpTime _slaveReadTill;
Database * const _db;
- ClientCursor(int queryOptions, shared_ptr<Cursor>& _c, const string& _ns) :
+ ClientCursor(int queryOptions, shared_ptr<Cursor>& _c, const string& _ns, BSONObj _query = BSONObj()) :
_idleAgeMillis(0), _pinValue(0),
_doingDeletes(false), _yieldSometimesTracker(128,10),
ns(_ns), c(_c),
- pos(0), _queryOptions(queryOptions),
+ pos(0), query(_query),
+ _queryOptions(queryOptions),
_db( cc().database() )
{
assert( _db );
diff --git a/db/cloner.cpp b/db/cloner.cpp
index 96890bf..9177a00 100644
--- a/db/cloner.cpp
+++ b/db/cloner.cpp
@@ -49,7 +49,7 @@ namespace mongo {
void setConnection( DBClientWithCommands *c ) { conn.reset( c ); }
bool go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk, bool useReplAuth, bool snapshot);
- bool copyCollection( const string& from , const string& ns , const BSONObj& query , string& errmsg , bool copyIndexes = true );
+ bool copyCollection( const string& from , const string& ns , const BSONObj& query , string& errmsg , bool copyIndexes = true, bool logForRepl = true );
};
/* for index info object:
@@ -198,12 +198,12 @@ namespace mongo {
}
}
- bool copyCollectionFromRemote(const string& host, const string& ns, const BSONObj& query, string errmsg) {
+ bool copyCollectionFromRemote(const string& host, const string& ns, const BSONObj& query, string& errmsg, bool logForRepl) {
Cloner c;
- return c.copyCollection(host, ns, query, errmsg , /*copyIndexes*/ true);
+ return c.copyCollection(host, ns, query, errmsg , /*copyIndexes*/ true, logForRepl);
}
- bool Cloner::copyCollection( const string& from , const string& ns , const BSONObj& query , string& errmsg , bool copyIndexes ){
+ bool Cloner::copyCollection( const string& from , const string& ns , const BSONObj& query , string& errmsg , bool copyIndexes, bool logForRepl ) {
auto_ptr<DBClientConnection> myconn;
myconn.reset( new DBClientConnection() );
if ( ! myconn->connect( from , errmsg ) )
@@ -223,12 +223,17 @@ namespace mongo {
}
{ // main data
- copy( ns.c_str() , ns.c_str() , false , true , false , true , Query(query).snapshot() );
+ copy( ns.c_str() , ns.c_str() , /*isindex*/false , logForRepl , false , true , Query(query).snapshot() );
}
+ /* TODO : copyIndexes bool does not seem to be implemented! */
+ if( !copyIndexes ) {
+ log() << "ERROR copy collection copyIndexes not implemented? " << ns << endl;
+ }
+
{ // indexes
string temp = ctx.db()->name + ".system.indexes";
- copy( temp.c_str() , temp.c_str() , true , true , false , true , BSON( "ns" << ns ) );
+ copy( temp.c_str() , temp.c_str() , /*isindex*/true , logForRepl , false , true , BSON( "ns" << ns ) );
}
return true;
}
diff --git a/db/commands.h b/db/commands.h
index eea4a71..a8a61c4 100644
--- a/db/commands.h
+++ b/db/commands.h
@@ -85,9 +85,7 @@ namespace mongo {
Note if run() returns false, we do NOT log.
*/
- virtual bool logTheOp() {
- return false;
- }
+ virtual bool logTheOp() { return false; }
virtual void help( stringstream& help ) const;
diff --git a/db/curop.h b/db/curop.h
index fbeda9f..bf06a69 100644
--- a/db/curop.h
+++ b/db/curop.h
@@ -222,10 +222,7 @@ namespace mongo {
memset(_queryBuf, 0, sizeof(_queryBuf));
}
- ~CurOp(){
- if ( _wrapped )
- _client->_curOp = _wrapped;
- }
+ ~CurOp();
BSONObj info() {
if( ! cc().getAuthenticationInfo()->isAuthorized("admin") ) {
diff --git a/db/db.cpp b/db/db.cpp
index 2b91956..d5b9339 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -67,7 +67,6 @@ namespace mongo {
#endif
void setupSignals();
- void closeAllSockets();
void startReplSets(ReplSetCmdline*);
void startReplication();
void pairWith(const char *remoteEnd, const char *arb);
@@ -104,7 +103,7 @@ namespace mongo {
virtual void accepted(MessagingPort *mp) {
if ( ! connTicketHolder.tryAcquire() ){
- log() << "connection refused because too many open connections: " << connTicketHolder.used() << endl;
+ log() << "connection refused because too many open connections: " << connTicketHolder.used() << " of " << connTicketHolder.outof() << endl;
// TODO: would be nice if we notified them...
mp->shutdown();
delete mp;
@@ -207,16 +206,14 @@ namespace mongo {
void connThread( MessagingPort * inPort )
{
TicketHolderReleaser connTicketReleaser( &connTicketHolder );
- Client::initThread("conn");
/* todo: move to Client object */
LastError *le = new LastError();
lastError.reset(le);
+ inPort->_logLevel = 1;
auto_ptr<MessagingPort> dbMsgPort( inPort );
-
- dbMsgPort->_logLevel = 1;
- Client& c = cc();
+ Client& c = Client::initThread("conn", inPort);
try {
@@ -522,7 +519,7 @@ sendmore:
l << ( is32bit ? " 32" : " 64" ) << "-bit " << endl;
}
DEV log() << "_DEBUG build (which is slower)" << endl;
- show_32_warning();
+ show_warnings();
log() << mongodVersion() << endl;
printGitVersion();
printSysInfo();
@@ -629,7 +626,7 @@ using namespace mongo;
namespace po = boost::program_options;
void show_help_text(po::options_description options) {
- show_32_warning();
+ show_warnings();
cout << options << endl;
};
@@ -1111,9 +1108,6 @@ int main(int argc, char* argv[], char *envp[] )
namespace mongo {
- /* we do not use log() below as it uses a mutex and that could cause deadlocks.
- */
-
string getDbContext();
#undef out
diff --git a/db/db.vcxproj b/db/db.vcxproj
index 61f81fe..0cabbd0 100644
--- a/db/db.vcxproj
+++ b/db/db.vcxproj
@@ -574,14 +574,26 @@
<ClCompile Include="repl\rs_config.cpp" />
</ItemGroup>
<ItemGroup>
- <None Include="..\jstests\rs\rs_basic.js" />
- <None Include="..\jstests\rs\test_framework.js" />
+ <None Include="..\jstests\replsets\replset1.js" />
+ <None Include="..\jstests\replsets\replset2.js" />
+ <None Include="..\jstests\replsets\replset3.js" />
+ <None Include="..\jstests\replsets\replset4.js" />
+ <None Include="..\jstests\replsets\replset5.js" />
+ <None Include="..\jstests\replsets\replsetadd.js" />
+ <None Include="..\jstests\replsets\replsetarb1.js" />
+ <None Include="..\jstests\replsets\replsetarb2.js" />
+ <None Include="..\jstests\replsets\replsetprio1.js" />
+ <None Include="..\jstests\replsets\replsetrestart1.js" />
+ <None Include="..\jstests\replsets\replsetrestart2.js" />
+ <None Include="..\jstests\replsets\replset_remove_node.js" />
+ <None Include="..\jstests\replsets\rollback.js" />
+ <None Include="..\jstests\replsets\rollback2.js" />
+ <None Include="..\jstests\replsets\sync1.js" />
+ <None Include="..\jstests\replsets\twosets.js" />
<None Include="..\SConstruct" />
<None Include="..\util\mongoutils\README" />
<None Include="mongo.ico" />
<None Include="repl\notes.txt" />
- <None Include="repl\test.html" />
- <None Include="repl\testing.js" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\client\dbclientcursor.h" />
diff --git a/db/db.vcxproj.filters b/db/db.vcxproj.filters
index f8d72a6..bf30b4e 100755
--- a/db/db.vcxproj.filters
+++ b/db/db.vcxproj.filters
@@ -840,7 +840,7 @@
<Filter Include="replSets">
<UniqueIdentifier>{3b73f786-d352-446f-a5f5-df49384baf7a}</UniqueIdentifier>
</Filter>
- <Filter Include="replSets\test stuff">
+ <Filter Include="replSets\testing">
<UniqueIdentifier>{4a1ea357-1077-4ad1-85b4-db48a6e1eb46}</UniqueIdentifier>
</Filter>
</ItemGroup>
@@ -851,24 +851,60 @@
<None Include="..\util\mongoutils\README">
<Filter>util\mongoutils</Filter>
</None>
- <None Include="repl\testing.js">
- <Filter>replSets\test stuff</Filter>
- </None>
- <None Include="repl\test.html">
- <Filter>replSets\test stuff</Filter>
- </None>
<None Include="..\SConstruct">
<Filter>db</Filter>
</None>
- <None Include="..\jstests\rs\rs_basic.js">
- <Filter>replSets\test stuff</Filter>
- </None>
- <None Include="..\jstests\rs\test_framework.js">
- <Filter>replSets\test stuff</Filter>
- </None>
<None Include="mongo.ico">
<Filter>Resource Files</Filter>
</None>
+ <None Include="..\jstests\replsets\replset_remove_node.js">
+ <Filter>replSets\testing</Filter>
+ </None>
+ <None Include="..\jstests\replsets\replset1.js">
+ <Filter>replSets\testing</Filter>
+ </None>
+ <None Include="..\jstests\replsets\replset2.js">
+ <Filter>replSets\testing</Filter>
+ </None>
+ <None Include="..\jstests\replsets\replset3.js">
+ <Filter>replSets\testing</Filter>
+ </None>
+ <None Include="..\jstests\replsets\replset4.js">
+ <Filter>replSets\testing</Filter>
+ </None>
+ <None Include="..\jstests\replsets\replset5.js">
+ <Filter>replSets\testing</Filter>
+ </None>
+ <None Include="..\jstests\replsets\replsetadd.js">
+ <Filter>replSets\testing</Filter>
+ </None>
+ <None Include="..\jstests\replsets\replsetarb1.js">
+ <Filter>replSets\testing</Filter>
+ </None>
+ <None Include="..\jstests\replsets\replsetarb2.js">
+ <Filter>replSets\testing</Filter>
+ </None>
+ <None Include="..\jstests\replsets\replsetprio1.js">
+ <Filter>replSets\testing</Filter>
+ </None>
+ <None Include="..\jstests\replsets\replsetrestart1.js">
+ <Filter>replSets\testing</Filter>
+ </None>
+ <None Include="..\jstests\replsets\replsetrestart2.js">
+ <Filter>replSets\testing</Filter>
+ </None>
+ <None Include="..\jstests\replsets\rollback.js">
+ <Filter>replSets\testing</Filter>
+ </None>
+ <None Include="..\jstests\replsets\rollback2.js">
+ <Filter>replSets\testing</Filter>
+ </None>
+ <None Include="..\jstests\replsets\sync1.js">
+ <Filter>replSets\testing</Filter>
+ </None>
+ <None Include="..\jstests\replsets\twosets.js">
+ <Filter>replSets\testing</Filter>
+ </None>
</ItemGroup>
<ItemGroup>
<Library Include="..\..\js\js64r.lib">
diff --git a/db/db_10.sln b/db/db_10.sln
index 0aa382f..d68d897 100644
--- a/db/db_10.sln
+++ b/db/db_10.sln
@@ -12,7 +12,6 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tools", "tools", "{2B262D59
..\tools\bsondump.cpp = ..\tools\bsondump.cpp
..\tools\dump.cpp = ..\tools\dump.cpp
..\tools\export.cpp = ..\tools\export.cpp
- ..\tools\files.cpp = ..\tools\files.cpp
..\tools\import.cpp = ..\tools\import.cpp
..\tools\restore.cpp = ..\tools\restore.cpp
..\tools\sniffer.cpp = ..\tools\sniffer.cpp
@@ -29,9 +28,6 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "unix files", "unix files",
EndProjectSection
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "shell", "shell", "{407B4B88-3451-433C-B74F-31B31FEB5791}"
- ProjectSection(SolutionItems) = preProject
- ..\shell\utils.h = ..\shell\utils.h
- EndProjectSection
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "other", "other", "{12B11474-2D74-48C3-BB3D-F03249BEA88F}"
EndProject
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index 22b0457..96374d9 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -74,6 +74,14 @@ namespace mongo {
}
} cmdResetError;
+ /* set by replica sets if specified in the configuration.
+ a pointer is used to avoid any possible locking issues with lockless reading (see below locktype() is NONE
+ and would like to keep that)
+ (for now, it simply orphans any old copy as config changes should be extremely rare).
+ note: once non-null, never goes to null again.
+ */
+ BSONObj *getLastErrorDefault = 0;
+
class CmdGetLastError : public Command {
public:
virtual LockType locktype() const { return NONE; }
@@ -88,7 +96,7 @@ namespace mongo {
help << "return error status of the last operation on this connection";
}
CmdGetLastError() : Command("getLastError", false, "getlasterror") {}
- bool run(const string& dbnamne, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool run(const string& dbnamne, BSONObj& _cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
LastError *le = lastError.disableForCommand();
if ( le->nPrev != 1 )
LastError::noError.appendSelf( result );
@@ -98,6 +106,18 @@ namespace mongo {
Client& c = cc();
c.appendLastOp( result );
+ BSONObj cmdObj = _cmdObj;
+ {
+ BSONObj::iterator i(_cmdObj);
+ i.next();
+ if( !i.more() ) {
+ /* empty, use default */
+ BSONObj *def = getLastErrorDefault;
+ if( def )
+ cmdObj = *def;
+ }
+ }
+
if ( cmdObj["fsync"].trueValue() ){
log() << "fsync from getlasterror" << endl;
result.append( "fsyncFiles" , MemoryMappedFile::flushAll( true ) );
@@ -389,8 +409,11 @@ namespace mongo {
if ( ! authed )
result.append( "note" , "run against admin for more info" );
- if ( Listener::getElapsedTimeMillis() - start > 1000 )
- result.append( "timing" , timeBuilder.obj() );
+ if ( Listener::getElapsedTimeMillis() - start > 1000 ){
+ BSONObj t = timeBuilder.obj();
+ log() << "serverStatus was very slow: " << t << endl;
+ result.append( "timing" , t );
+ }
return true;
}
@@ -690,12 +713,8 @@ namespace mongo {
class CmdReIndex : public Command {
public:
- virtual bool logTheOp() {
- return true;
- }
- virtual bool slaveOk() const {
- return false;
- }
+ virtual bool logTheOp() { return false; } // only reindexes on the one node
+ virtual bool slaveOk() const { return true; } // can reindex on a secondary
virtual LockType locktype() const { return WRITE; }
virtual void help( stringstream& help ) const {
help << "re-index a collection";
@@ -745,9 +764,6 @@ namespace mongo {
class CmdListDatabases : public Command {
public:
- virtual bool logTheOp() {
- return false;
- }
virtual bool slaveOk() const {
return true;
}
@@ -925,15 +941,34 @@ namespace mongo {
"\nnote: This command may take a while to run";
}
bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ Timer timer;
+
string ns = jsobj.firstElement().String();
BSONObj min = jsobj.getObjectField( "min" );
BSONObj max = jsobj.getObjectField( "max" );
BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );
+ bool estimate = jsobj["estimate"].trueValue();
Client::Context ctx( ns );
+ NamespaceDetails *d = nsdetails(ns.c_str());
+
+ if ( ! d || d->nrecords == 0 ){
+ result.appendNumber( "size" , 0 );
+ result.appendNumber( "numObjects" , 0 );
+ result.append( "millis" , timer.millis() );
+ return true;
+ }
+ result.appendBool( "estimate" , estimate );
+
shared_ptr<Cursor> c;
if ( min.isEmpty() && max.isEmpty() ) {
+ if ( estimate ){
+ result.appendNumber( "size" , d->datasize );
+ result.appendNumber( "numObjects" , d->nrecords );
+ result.append( "millis" , timer.millis() );
+ return 1;
+ }
c = theDataFileMgr.findAll( ns.c_str() );
}
else if ( min.isEmpty() || max.isEmpty() ) {
@@ -944,18 +979,24 @@ namespace mongo {
IndexDetails *idx = cmdIndexDetailsForRange( ns.c_str(), errmsg, min, max, keyPattern );
if ( idx == 0 )
return false;
- NamespaceDetails *d = nsdetails(ns.c_str());
+
c.reset( new BtreeCursor( d, d->idxNo(*idx), *idx, min, max, false, 1 ) );
}
+ long long avgObjSize = d->datasize / d->nrecords;
+
long long maxSize = jsobj["maxSize"].numberLong();
long long maxObjects = jsobj["maxObjects"].numberLong();
- Timer timer;
long long size = 0;
long long numObjects = 0;
while( c->ok() ) {
- size += c->currLoc().rec()->netLength();
+
+ if ( estimate )
+ size += avgObjSize;
+ else
+ size += c->currLoc().rec()->netLength();
+
numObjects++;
if ( ( maxSize && size > maxSize ) ||
@@ -974,8 +1015,8 @@ namespace mongo {
}
logIfSlow( timer , os.str() );
- result.append( "size", (double)size );
- result.append( "numObjects" , (double)numObjects );
+ result.appendNumber( "size", size );
+ result.appendNumber( "numObjects" , numObjects );
result.append( "millis" , timer.millis() );
return true;
}
@@ -1555,9 +1596,6 @@ namespace mongo {
class CmdWhatsMyUri : public Command {
public:
CmdWhatsMyUri() : Command("whatsmyuri") { }
- virtual bool logTheOp() {
- return false; // the modification will be logged directly
- }
virtual bool slaveOk() const {
return true;
}
@@ -1693,12 +1731,8 @@ namespace mongo {
public:
virtual LockType locktype() const { return NONE; }
virtual bool adminOnly() const { return true; }
- virtual bool logTheOp() {
- return false;
- }
- virtual bool slaveOk() const {
- return true;
- }
+ virtual bool logTheOp() { return false; }
+ virtual bool slaveOk() const { return true; }
virtual void help( stringstream& help ) const {
help << "internal testing command. Makes db block (in a read lock) for 100 seconds\n";
help << "w:true write lock";
@@ -1798,7 +1832,7 @@ namespace mongo {
if ( c->adminOnly() && ! fromRepl && dbname != "admin" ) {
- result.append( "errmsg" , "access denied- use admin db" );
+ result.append( "errmsg" , "access denied; use admin db" );
log() << "command denied: " << cmdObj.toString() << endl;
return false;
}
diff --git a/db/dbcommands_generic.cpp b/db/dbcommands_generic.cpp
index 340f31c..25c6a93 100644
--- a/db/dbcommands_generic.cpp
+++ b/db/dbcommands_generic.cpp
@@ -196,8 +196,9 @@ namespace mongo {
CmdShutdown() : Command("shutdown") {}
bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
Client * c = currentClient.get();
- if ( c )
+ if ( c ) {
c->shutdown();
+ }
log() << "terminating, shutdown command received" << endl;
dbexit( EXIT_CLEAN ); // this never returns
return true;
diff --git a/db/instance.cpp b/db/instance.cpp
index ec3b793..9e81464 100644
--- a/db/instance.cpp
+++ b/db/instance.cpp
@@ -62,7 +62,6 @@ namespace mongo {
bool useCursors = true;
bool useHints = true;
- void closeAllSockets();
void flushOpLog( stringstream &ss ) {
if( _diaglog.f && _diaglog.f->is_open() ) {
ss << "flushing op log and files\n";
@@ -332,9 +331,10 @@ namespace mongo {
}
}
catch ( AssertionException& e ) {
- tlog() << " Caught Assertion in " << opToString(op) << " , continuing" << endl;
+ static int n;
+ tlog(3) << " Caught Assertion in " << opToString(op) << ", continuing" << endl;
ss << " exception " + e.toString();
- log = true;
+ log = ++n < 10;
}
}
}
@@ -446,6 +446,7 @@ namespace mongo {
mongolock lk(1);
+ // if this ever moves to outside of lock, need to adjust check Client::Context::_finishInit
if ( ! broadcast && handlePossibleShardedMessage( m , 0 ) )
return;
@@ -472,8 +473,10 @@ namespace mongo {
}
writelock lk(ns);
+ // if this ever moves to outside of lock, need to adjust check Client::Context::_finishInit
if ( ! broadcast & handlePossibleShardedMessage( m , 0 ) )
return;
+
Client::Context ctx(ns);
long long n = deleteObjects(ns, pattern, justOne, true);
@@ -709,32 +712,32 @@ namespace mongo {
}
catch (...) { }
- tryToOutputFatal( "dbexit: really exiting now\n" );
+ tryToOutputFatal( "dbexit: really exiting now" );
if ( c ) c->shutdown();
::exit(rc);
}
void shutdown() {
- log() << "\t shutdown: going to close listening sockets..." << endl;
+ log() << "shutdown: going to close listening sockets..." << endl;
ListeningSockets::get()->closeAll();
- log() << "\t shutdown: going to flush oplog..." << endl;
+ log() << "shutdown: going to flush oplog..." << endl;
stringstream ss2;
flushOpLog( ss2 );
rawOut( ss2.str() );
/* must do this before unmapping mem or you may get a seg fault */
- log() << "\t shutdown: going to close sockets..." << endl;
- boost::thread close_socket_thread(closeAllSockets);
+ log() << "shutdown: going to close sockets..." << endl;
+ boost::thread close_socket_thread( boost::bind(MessagingPort::closeAllSockets, 0) );
// wait until file preallocation finishes
// we would only hang here if the file_allocator code generates a
// synchronous signal, which we don't expect
- log() << "\t shutdown: waiting for fs preallocator..." << endl;
+ log() << "shutdown: waiting for fs preallocator..." << endl;
theFileAllocator().waitUntilFinished();
- log() << "\t shutdown: closing all files..." << endl;
+ log() << "shutdown: closing all files..." << endl;
stringstream ss3;
MemoryMappedFile::closeAllFiles( ss3 );
rawOut( ss3.str() );
@@ -744,9 +747,9 @@ namespace mongo {
#if !defined(_WIN32) && !defined(__sunos__)
if ( lockFile ){
- log() << "\t shutdown: removing fs lock..." << endl;
+ log() << "shutdown: removing fs lock..." << endl;
if( ftruncate( lockFile , 0 ) )
- log() << "\t couldn't remove fs lock " << errnoWithDescription() << endl;
+ log() << "couldn't remove fs lock " << errnoWithDescription() << endl;
flock( lockFile, LOCK_UN );
}
#endif
@@ -766,12 +769,14 @@ namespace mongo {
bool oldFile = false;
- if ( boost::filesystem::exists( name ) && boost::filesystem::file_size( name ) > 0 ){
+ if ( boost::filesystem::exists( name ) && boost::filesystem::file_size( name ) > 0 ) {
oldFile = true;
}
lockFile = open( name.c_str(), O_RDWR | O_CREAT , S_IRWXU | S_IRWXG | S_IRWXO );
- uassert( 10309 , "Unable to create / open lock file for lockfilepath: " + name, lockFile > 0 );
+ if( lockFile <= 0 ) {
+ uasserted( 10309 , str::stream() << "Unable to create / open lock file for lockfilepath: " << name << ' ' << errnoWithDescription());
+ }
if (flock( lockFile, LOCK_EX | LOCK_NB ) != 0) {
close ( lockFile );
lockFile = 0;
diff --git a/db/lasterror.cpp b/db/lasterror.cpp
index 630fcfb..12fc694 100644
--- a/db/lasterror.cpp
+++ b/db/lasterror.cpp
@@ -34,8 +34,11 @@ namespace mongo {
void raiseError(int code , const char *msg) {
LastError *le = lastError.get();
if ( le == 0 ) {
- /* might be intentional (non-user thread) */
- OCCASIONALLY DEV if( !isShell ) log() << "warning dev: lastError==0 won't report:" << msg << endl;
+ /* might be intentional (non-user thread) */
+ DEV {
+ static unsigned n;
+ if( ++n < 4 && !isShell ) log() << "dev: lastError==0 won't report:" << msg << endl;
+ }
} else if ( le->disabled ) {
log() << "lastError disabled, can't report: " << code << ":" << msg << endl;
} else {
diff --git a/db/matcher.cpp b/db/matcher.cpp
index 681a6dc..cd62563 100644
--- a/db/matcher.cpp
+++ b/db/matcher.cpp
@@ -259,7 +259,7 @@ namespace mongo {
}
void Matcher::parseOr( const BSONElement &e, bool subMatcher, list< shared_ptr< Matcher > > &matchers ) {
- uassert( 13090, "recursive $or/$nor not allowed", !subMatcher );
+ uassert( 13090, "nested $or/$nor not allowed", !subMatcher );
uassert( 13086, "$or/$nor must be a nonempty array", e.type() == Array && e.embeddedObject().nFields() > 0 );
BSONObjIterator j( e.embeddedObject() );
while( j.more() ) {
diff --git a/db/oplog.cpp b/db/oplog.cpp
index 4ad4ca9..93800c7 100644
--- a/db/oplog.cpp
+++ b/db/oplog.cpp
@@ -142,8 +142,10 @@ namespace mongo {
this code (or code in now() maybe) should be improved.
*/
if( theReplSet ) {
- if( !(theReplSet->lastOpTimeWritten<ts) )
- log() << "replSet ERROR possible failover clock skew issue? " << theReplSet->lastOpTimeWritten << ' ' << ts << endl;
+ if( !(theReplSet->lastOpTimeWritten<ts) ) {
+ log() << "replSet ERROR possible failover clock skew issue? " << theReplSet->lastOpTimeWritten << ' ' << ts << rsLog;
+ log() << "replSet " << theReplSet->isPrimary() << rsLog;
+ }
theReplSet->lastOpTimeWritten = ts;
theReplSet->lastH = hNew;
ctx.getClient()->setLastOp( ts.asDate() );
diff --git a/db/oplog.h b/db/oplog.h
index d1e4990..34c345f 100644
--- a/db/oplog.h
+++ b/db/oplog.h
@@ -195,7 +195,7 @@ namespace mongo {
// Use a ClientCursor here so we can release db mutex while scanning
// oplog (can take quite a while with large oplogs).
shared_ptr<Cursor> c = _qp.newReverseCursor();
- _findingStartCursor = new ClientCursor(QueryOption_NoCursorTimeout, c, _qp.ns());
+ _findingStartCursor = new ClientCursor(QueryOption_NoCursorTimeout, c, _qp.ns(), BSONObj());
_findingStartTimer.reset();
_findingStartMode = Initial;
BSONElement tsElt = _qp.originalQuery()[ "ts" ];
diff --git a/db/pdfile.cpp b/db/pdfile.cpp
index cf7cb22..216f21a 100644
--- a/db/pdfile.cpp
+++ b/db/pdfile.cpp
@@ -56,8 +56,6 @@ namespace mongo {
}
};
- const int MaxExtentSize = 0x7ff00000;
-
map<string, unsigned> BackgroundOperation::dbsInProg;
set<string> BackgroundOperation::nsInProg;
@@ -157,7 +155,7 @@ namespace mongo {
sz = 1000000000;
int z = ((int)sz) & 0xffffff00;
assert( z > len );
- DEV tlog() << "initialExtentSize(" << len << ") returns " << z << endl;
+ //DEV tlog() << "initialExtentSize(" << len << ") returns " << z << endl;
return z;
}
@@ -272,10 +270,19 @@ namespace mongo {
/*---------------------------------------------------------------------*/
int MongoDataFile::maxSize() {
- if ( sizeof( int* ) == 4 )
+ if ( sizeof( int* ) == 4 ) {
return 512 * 1024 * 1024;
- else
+ } else if ( cmdLine.smallfiles ) {
+ return 0x7ff00000 >> 2;
+ } else {
return 0x7ff00000;
+ }
+ }
+
+ void MongoDataFile::badOfs(int ofs) const {
+ stringstream ss;
+ ss << "bad offset:" << ofs << " accessing file: " << mmf.filename() << " - consider repairing database";
+ uasserted(13440, ss.str());
}
int MongoDataFile::defaultSize( const char *filename ) const {
@@ -380,7 +387,7 @@ namespace mongo {
Extent* MongoDataFile::createExtent(const char *ns, int approxSize, bool newCapped, int loops) {
massert( 10357 , "shutdown in progress", !goingAway );
- massert( 10358 , "bad new extent size", approxSize >= 0 && approxSize <= MaxExtentSize );
+ massert( 10358 , "bad new extent size", approxSize >= 0 && approxSize <= Extent::maxSize() );
massert( 10359 , "header==0 on new extent: 32 bit mmap space exceeded?", header ); // null if file open failed
int ExtentSize = approxSize <= header->unusedLength ? approxSize : header->unusedLength;
DiskLoc loc;
@@ -403,8 +410,8 @@ namespace mongo {
addNewExtentToNamespace(ns, e, loc, emptyLoc, newCapped);
- DEV tlog() << "new extent " << ns << " size: 0x" << hex << ExtentSize << " loc: 0x" << hex << offset
- << " emptyLoc:" << hex << emptyLoc.getOfs() << dec << endl;
+ DEV tlog(1) << "new extent " << ns << " size: 0x" << hex << ExtentSize << " loc: 0x" << hex << offset
+ << " emptyLoc:" << hex << emptyLoc.getOfs() << dec << endl;
return e;
}
@@ -568,6 +575,14 @@ namespace mongo {
}
*/
+ int Extent::maxSize() {
+ int maxExtentSize = 0x7ff00000;
+ if ( cmdLine.smallfiles ) {
+ maxExtentSize >>= 2;
+ }
+ return maxExtentSize;
+ }
+
/*---------------------------------------------------------------------*/
shared_ptr<Cursor> DataFileMgr::findAll(const char *ns, const DiskLoc &startLoc) {
@@ -897,7 +912,7 @@ namespace mongo {
if ( toupdate->netLength() < objNew.objsize() ) {
// doesn't fit. reallocate -----------------------------------------------------
- uassert( 10003 , "E10003 failing update: objects in a capped ns cannot grow", !(d && d->capped));
+ uassert( 10003 , "failing update: objects in a capped ns cannot grow", !(d && d->capped));
d->paddingTooSmall();
if ( cc().database()->profile )
ss << " moved ";
@@ -950,15 +965,15 @@ namespace mongo {
}
int followupExtentSize(int len, int lastExtentLen) {
- assert( len < MaxExtentSize );
+ assert( len < Extent::maxSize() );
int x = initialExtentSize(len);
int y = (int) (lastExtentLen < 4000000 ? lastExtentLen * 4.0 : lastExtentLen * 1.2);
int sz = y > x ? y : x;
if ( sz < lastExtentLen )
sz = lastExtentLen;
- else if ( sz > MaxExtentSize )
- sz = MaxExtentSize;
+ else if ( sz > Extent::maxSize() )
+ sz = Extent::maxSize();
sz = ((int)sz) & 0xffffff00;
assert( sz > len );
@@ -1029,7 +1044,7 @@ namespace mongo {
Timer t;
- tlog() << "Buildindex " << ns << " idxNo:" << idxNo << ' ' << idx.info.obj().toString() << endl;
+ tlog(1) << "fastBuildIndex " << ns << " idxNo:" << idxNo << ' ' << idx.info.obj().toString() << endl;
bool dupsAllowed = !idx.unique();
bool dropDups = idx.dropDups() || inDBRepair;
@@ -1514,6 +1529,13 @@ namespace mongo {
BSONObj info = loc.obj();
bool background = info["background"].trueValue();
+ if( background && cc().isSyncThread() ) {
+ /* don't do background indexing on slaves. there are nuances. this could be added later
+ but requires more code.
+ */
+ log() << "info: indexing in foreground on this replica; was a background index build on the primary" << endl;
+ background = false;
+ }
int idxNo = tableToIndex->nIndexes;
IndexDetails& idx = tableToIndex->addIndex(tabletoidxns.c_str(), !background); // clear transient info caches so they refresh; increments nIndexes
diff --git a/db/pdfile.h b/db/pdfile.h
index 084a542..d268aac 100644
--- a/db/pdfile.h
+++ b/db/pdfile.h
@@ -79,6 +79,8 @@ namespace mongo {
void flush( bool sync );
private:
+ void badOfs(int) const;
+
int defaultSize( const char *filename ) const;
Extent* getExtent(DiskLoc loc);
@@ -255,6 +257,8 @@ namespace mongo {
Extent* getPrevExtent() {
return xprev.isNull() ? 0 : DataFileMgr::getExtent(xprev);
}
+
+ static int maxSize();
};
/*
@@ -339,7 +343,7 @@ namespace mongo {
inline Record* MongoDataFile::recordAt(DiskLoc dl) {
int ofs = dl.getOfs();
- assert( ofs >= DataFileHeader::HeaderSize );
+ if( ofs < DataFileHeader::HeaderSize ) badOfs(ofs); // will uassert - external call to keep out of the normal code path
return (Record*) _p.at(ofs, -1);
}
diff --git a/db/query.cpp b/db/query.cpp
index bfe845c..5bd7b00 100644
--- a/db/query.cpp
+++ b/db/query.cpp
@@ -1044,14 +1044,13 @@ namespace mongo {
if ( moreClauses ) {
// this MultiCursor will use a dumb NoOp to advance(), so no need to specify mayYield
shared_ptr< Cursor > multi( new MultiCursor( mps, cursor, dqo.matcher(), dqo ) );
- cc = new ClientCursor(queryOptions, multi, ns);
+ cc = new ClientCursor(queryOptions, multi, ns, jsobj.getOwned());
} else {
cursor->setMatcher( dqo.matcher() );
- cc = new ClientCursor( queryOptions, cursor, ns );
+ cc = new ClientCursor( queryOptions, cursor, ns, jsobj.getOwned() );
}
cursorid = cc->cursorid;
- cc->query = jsobj.getOwned();
- DEV tlog() << "query has more, cursorid: " << cursorid << endl;
+ DEV tlog(2) << "query has more, cursorid: " << cursorid << endl;
cc->pos = n;
cc->pq = pq_shared;
cc->fields = pq.getFieldPtr();
diff --git a/db/queryoptimizer.cpp b/db/queryoptimizer.cpp
index 3d4cbd0..e7068c2 100644
--- a/db/queryoptimizer.cpp
+++ b/db/queryoptimizer.cpp
@@ -361,7 +361,7 @@ namespace mongo {
const IndexSpec& spec = ii.getSpec();
if ( spec.getTypeName() == _special && spec.suitability( _originalQuery , order_ ) ){
usingPrerecordedPlan_ = true;
- mayRecordPlan_ = true;
+ mayRecordPlan_ = false;
plans_.push_back( PlanPtr( new QueryPlan( d , j , *fbs_ , _originalQuery, order_ ,
BSONObj() , BSONObj() , _special ) ) );
return;
diff --git a/db/queryutil.cpp b/db/queryutil.cpp
index 791096f..007a1ce 100644
--- a/db/queryutil.cpp
+++ b/db/queryutil.cpp
@@ -944,6 +944,8 @@ namespace mongo {
return ( l % 2 == 0 ); // if we're inside an interval
}
+ // binary search for interval containing the specified element
+ // an even return value indicates that the element is contained within a valid interval
int FieldRangeVector::matchingLowElement( const BSONElement &e, int i, bool forward ) const {
int l = -1;
int h = _ranges[ i ].intervals().size() * 2;
@@ -1007,9 +1009,13 @@ namespace mongo {
int FieldRangeVector::Iterator::advance( const BSONObj &curr ) {
BSONObjIterator j( curr );
BSONObjIterator o( _v._keyPattern );
+ // track first field for which we are not at the end of the valid values,
+ // since we may need to advance from the key prefix ending with this field
int latestNonEndpoint = -1;
+ // iterate over fields to determine appropriate advance method
for( int i = 0; i < (int)_i.size(); ++i ) {
if ( i > 0 && !_v._ranges[ i - 1 ].intervals()[ _i[ i - 1 ] ].equality() ) {
+ // if last bound was inequality, we don't know anything about where we are for this field
// TODO if possible avoid this certain cases when field in prev key is the same
setMinus( i );
}
@@ -1017,9 +1023,9 @@ namespace mongo {
BSONElement oo = o.next();
bool reverse = ( ( oo.number() < 0 ) ^ ( _v._direction < 0 ) );
BSONElement jj = j.next();
- if ( _i[ i ] == -1 ) {
+ if ( _i[ i ] == -1 ) { // unknown position for this field, do binary search
int l = _v.matchingLowElement( jj, i, !reverse );
- if ( l % 2 == 0 ) {
+ if ( l % 2 == 0 ) { // we are in a valid range for this field
_i[ i ] = l / 2;
int diff = (int)_v._ranges[ i ].intervals().size() - _i[ i ];
if ( diff > 1 ) {
@@ -1031,7 +1037,8 @@ namespace mongo {
}
}
continue;
- } else {
+ } else { // not in a valid range for this field - determine if and how to advance
+ // check if we're after the last interval for this field
if ( l == (int)_v._ranges[ i ].intervals().size() * 2 - 1 ) {
if ( latestNonEndpoint == -1 ) {
return -2;
@@ -1053,7 +1060,11 @@ namespace mongo {
}
}
bool first = true;
+ // _i[ i ] != -1, so we have a starting interval for this field
+ // which serves as a lower/equal bound on the first iteration -
+ // we advance from this interval to find a matching interval
while( _i[ i ] < (int)_v._ranges[ i ].intervals().size() ) {
+ // compare to current interval's upper bound
int x = _v._ranges[ i ].intervals()[ _i[ i ] ]._upper._bound.woCompare( jj, false );
if ( reverse ) {
x = -x;
@@ -1062,16 +1073,22 @@ namespace mongo {
eq = true;
break;
}
+ // see if we're less than the upper bound
if ( x > 0 ) {
if ( i == 0 && first ) {
- break; // the value of 1st field won't go backward
+ // the value of 1st field won't go backward, so don't check lower bound
+ // TODO maybe we can check first only?
+ break;
}
+ // if it's an equality interval, don't need to compare separately to lower bound
if ( !_v._ranges[ i ].intervals()[ _i[ i ] ].equality() ) {
+ // compare to current interval's lower bound
x = _v._ranges[ i ].intervals()[ _i[ i ] ]._lower._bound.woCompare( jj, false );
if ( reverse ) {
x = -x;
}
}
+ // if we're less than the lower bound, advance
if ( x > 0 ) {
setZero( i + 1 );
// skip to curr / i / nextbounds
@@ -1084,17 +1101,20 @@ namespace mongo {
break;
}
}
+ // we're above the upper bound, so try next interval and reset remaining fields
++_i[ i ];
setZero( i + 1 );
first = false;
}
int diff = (int)_v._ranges[ i ].intervals().size() - _i[ i ];
if ( diff > 1 || ( !eq && diff == 1 ) ) {
+ // check if we're not at the end of valid values for this field
latestNonEndpoint = i;
- } else if ( diff == 0 ) {
+ } else if ( diff == 0 ) { // check if we're past the last interval for this field
if ( latestNonEndpoint == -1 ) {
return -2;
}
+ // more values possible, skip...
setZero( latestNonEndpoint + 1 );
// skip to curr / latestNonEndpoint + 1 / superlative
for( int j = latestNonEndpoint + 1; j < (int)_i.size(); ++j ) {
diff --git a/db/repl.cpp b/db/repl.cpp
index 37197ba..085ae64 100644
--- a/db/repl.cpp
+++ b/db/repl.cpp
@@ -137,9 +137,6 @@ namespace mongo {
virtual bool adminOnly() const {
return true;
}
- virtual bool logTheOp() {
- return false;
- }
virtual LockType locktype() const { return WRITE; }
void help(stringstream&h) const { h << "replace a node in a replica pair"; }
CmdReplacePeer() : Command("replacePeer", false, "replacepeer") { }
@@ -199,9 +196,6 @@ namespace mongo {
virtual bool adminOnly() const {
return true;
}
- virtual bool logTheOp() {
- return false;
- }
virtual void help(stringstream& h) const { h << "internal"; }
virtual LockType locktype() const { return WRITE; }
CmdForceDead() : Command("forcedead") { }
@@ -222,9 +216,7 @@ namespace mongo {
virtual bool adminOnly() const {
return true;
}
- virtual bool logTheOp() {
- return false;
- }
+ virtual bool logTheOp() { return false; }
virtual LockType locktype() const { return WRITE; }
void help(stringstream&h) const { h << "resync (from scratch) an out of date replica slave.\nhttp://www.mongodb.org/display/DOCS/Master+Slave"; }
CmdResync() : Command("resync") { }
@@ -861,7 +853,6 @@ namespace mongo {
if( logLevel >= 6 ) // op.tostring is expensive so doing this check explicitly
log(6) << "processing op: " << op << endl;
- // skip no-op
if( op.getStringField("op")[0] == 'n' )
return;
@@ -1260,8 +1251,14 @@ namespace mongo {
if ( tailing || initial ) {
if ( initial )
log(1) << "repl: initial run\n";
- else
- assert( syncedTo < nextOpTime );
+ else {
+ if( !( syncedTo <= nextOpTime ) ) {
+ log() << "repl ASSERTION failed : syncedTo <= nextOpTime" << endl;
+ log() << "repl syncTo: " << syncedTo.toStringLong() << endl;
+ log() << "repl nextOpTime: " << nextOpTime.toStringLong() << endl;
+ assert(false);
+ }
+ }
oplogReader.putBack( op ); // op will be processed in the loop below
nextOpTime = OpTime(); // will reread the op below
}
@@ -1690,6 +1687,7 @@ namespace mongo {
void replSlaveThread() {
sleepsecs(1);
Client::initThread("replslave");
+ cc().iAmSyncThread();
{
dblock lk;
diff --git a/db/repl/consensus.cpp b/db/repl/consensus.cpp
index 4eba17d..4044538 100644
--- a/db/repl/consensus.cpp
+++ b/db/repl/consensus.cpp
@@ -83,6 +83,17 @@ namespace mongo {
return vUp * 2 > totalVotes();
}
+ bool Consensus::shouldRelinquish() const {
+ int vUp = rs._self->config().votes;
+ const long long T = rs.config().ho.heartbeatTimeoutMillis * rs.config().ho.heartbeatConnRetries;
+ for( Member *m = rs.head(); m; m=m->next() ) {
+ long long dt = m->hbinfo().timeDown();
+ if( dt < T )
+ vUp += m->config().votes;
+ }
+ return !( vUp * 2 > totalVotes() );
+ }
+
static const int VETO = -10000;
const time_t LeaseTime = 30;
@@ -322,6 +333,7 @@ namespace mongo {
void Consensus::electSelf() {
assert( !rs.lockedByMe() );
assert( !rs.myConfig().arbiterOnly );
+ assert( rs.myConfig().slaveDelay == 0 );
try {
_electSelf();
}
diff --git a/db/repl/health.cpp b/db/repl/health.cpp
index b0be25f..72396fe 100644
--- a/db/repl/health.cpp
+++ b/db/repl/health.cpp
@@ -89,11 +89,13 @@ namespace mongo {
}
s << td(config().votes);
{
- string stateText = ReplSet::stateAsStr(state());
+ string stateText = state().toString();
+ if( _config.hidden )
+ stateText += " (hidden)";
if( ok || stateText.empty() )
s << td(stateText); // text blank if we've never connected
else
- s << td( grey(str::stream() << "(was " << ReplSet::stateAsStr(state()) << ')', true) );
+ s << td( grey(str::stream() << "(was " << state().toString() << ')', true) );
}
s << td( grey(hbinfo().lastHeartbeatMsg,!ok) );
stringstream q;
@@ -105,28 +107,30 @@ namespace mongo {
s << td("");
s << _tr();
}
-
+
string ReplSetImpl::stateAsHtml(MemberState s) {
if( s.s == MemberState::RS_STARTUP ) return a("", "serving still starting up, or still trying to initiate the set", "STARTUP");
if( s.s == MemberState::RS_PRIMARY ) return a("", "this server thinks it is primary", "PRIMARY");
if( s.s == MemberState::RS_SECONDARY ) return a("", "this server thinks it is a secondary (slave mode)", "SECONDARY");
if( s.s == MemberState::RS_RECOVERING ) return a("", "recovering/resyncing; after recovery usually auto-transitions to secondary", "RECOVERING");
- if( s.s == MemberState::RS_FATAL ) return a("", "something bad has occurred and server is not completely offline with regard to the replica set. fatal error.", "RS_FATAL");
- if( s.s == MemberState::RS_STARTUP2 ) return a("", "loaded config, still determining who is primary", "RS_STARTUP2");
+ if( s.s == MemberState::RS_FATAL ) return a("", "something bad has occurred and server is not completely offline with regard to the replica set. fatal error.", "FATAL");
+ if( s.s == MemberState::RS_STARTUP2 ) return a("", "loaded config, still determining who is primary", "STARTUP2");
if( s.s == MemberState::RS_ARBITER ) return a("", "this server is an arbiter only", "ARBITER");
if( s.s == MemberState::RS_DOWN ) return a("", "member is down, slow, or unreachable", "DOWN");
+ if( s.s == MemberState::RS_ROLLBACK ) return a("", "rolling back operations to get in sync", "ROLLBACK");
return "";
}
- string ReplSetImpl::stateAsStr(MemberState s) {
- if( s.s == MemberState::RS_STARTUP ) return "STARTUP";
- if( s.s == MemberState::RS_PRIMARY ) return "PRIMARY";
- if( s.s == MemberState::RS_SECONDARY ) return "SECONDARY";
- if( s.s == MemberState::RS_RECOVERING ) return "RECOVERING";
- if( s.s == MemberState::RS_FATAL ) return "FATAL";
- if( s.s == MemberState::RS_STARTUP2 ) return "STARTUP2";
- if( s.s == MemberState::RS_ARBITER ) return "ARBITER";
- if( s.s == MemberState::RS_DOWN ) return "DOWN";
+ string MemberState::toString() const {
+ if( s == MemberState::RS_STARTUP ) return "STARTUP";
+ if( s == MemberState::RS_PRIMARY ) return "PRIMARY";
+ if( s == MemberState::RS_SECONDARY ) return "SECONDARY";
+ if( s == MemberState::RS_RECOVERING ) return "RECOVERING";
+ if( s == MemberState::RS_FATAL ) return "FATAL";
+ if( s == MemberState::RS_STARTUP2 ) return "STARTUP2";
+ if( s == MemberState::RS_ARBITER ) return "ARBITER";
+ if( s == MemberState::RS_DOWN ) return "DOWN";
+ if( s == MemberState::RS_ROLLBACK ) return "ROLLBACK";
return "";
}
@@ -302,7 +306,7 @@ namespace mongo {
td(ago(started)) <<
td("") << // last heartbeat
td(ToString(_self->config().votes)) <<
- td(stateAsHtml(box.getState()));
+ td( stateAsHtml(box.getState()) + (_self->config().hidden?" (hidden)":"") );
s << td( _hbmsg );
stringstream q;
q << "/_replSetOplog?" << _self->id();
diff --git a/db/repl/health.h b/db/repl/health.h
index 8b1005e..645a3b5 100644
--- a/db/repl/health.h
+++ b/db/repl/health.h
@@ -27,7 +27,7 @@ namespace mongo {
HealthOptions() {
heartbeatSleepMillis = 2000;
heartbeatTimeoutMillis = 10000;
- heartbeatConnRetries = 3;
+ heartbeatConnRetries = 2;
}
bool isDefault() const { return *this == HealthOptions(); }
diff --git a/db/repl/heartbeat.cpp b/db/repl/heartbeat.cpp
index 78ce5d1..4f28897 100644
--- a/db/repl/heartbeat.cpp
+++ b/db/repl/heartbeat.cpp
@@ -40,6 +40,13 @@ namespace mongo {
// hacky
string *discoveredSeed = 0;
+ long long HeartbeatInfo::timeDown() const {
+ if( up() ) return 0;
+ if( downSince == 0 )
+ return 0; // still waiting on first heartbeat
+ return jsTime() - downSince;
+ }
+
/* { replSetHeartbeat : <setname> } */
class CmdReplSetHeartbeat : public ReplSetCommand {
public:
@@ -55,6 +62,14 @@ namespace mongo {
errmsg = "not running with --replSet";
return false;
}
+
+ /* we want to keep heartbeat connections open when relinquishing primary. tag them here. */
+ {
+ MessagingPort *mp = cc()._mp;
+ if( mp )
+ mp->tag |= 1;
+ }
+
if( cmdObj["pv"].Int() != 1 ) {
errmsg = "incompatible replset protocol version";
return false;
@@ -91,7 +106,7 @@ namespace mongo {
result.append("set", theReplSet->name());
result.append("state", theReplSet->state().s);
result.append("hbmsg", theReplSet->hbmsg());
- result.append("time", (int) time(0));
+ result.append("time", (long long) time(0));
result.appendDate("opTime", theReplSet->lastOpTimeWritten.asDate());
int v = theReplSet->config().version;
result.append("v", v);
@@ -196,7 +211,12 @@ namespace mongo {
static time_t last = 0;
time_t now = time(0);
- if( mem.changed(old) || now-last>4 ) {
+ bool changed = mem.changed(old);
+ if( changed ) {
+ if( old.hbstate != mem.hbstate )
+ log() << "replSet " << h.toString() << ' ' << mem.hbstate.toString() << rsLog;
+ }
+ if( changed || now-last>4 ) {
last = now;
theReplSet->mgr->send( boost::bind(&Manager::msgCheckNewState, theReplSet->mgr) );
}
@@ -205,8 +225,9 @@ namespace mongo {
private:
void down(HeartbeatInfo& mem, string msg) {
mem.health = 0.0;
- if( mem.upSince ) {
+ if( mem.upSince || mem.downSince == 0 ) {
mem.upSince = 0;
+ mem.downSince = jsTime();
log() << "replSet info " << h.toString() << " is now down (or slow to respond)" << rsLog;
}
mem.lastHeartbeatMsg = msg;
diff --git a/db/repl/manager.cpp b/db/repl/manager.cpp
index e870688..862ac46 100644
--- a/db/repl/manager.cpp
+++ b/db/repl/manager.cpp
@@ -29,12 +29,17 @@ namespace mongo {
};
/* check members OTHER THAN US to see if they think they are primary */
- const Member * Manager::findOtherPrimary() {
+ const Member * Manager::findOtherPrimary(bool& two) {
+ two = false;
Member *m = rs->head();
Member *p = 0;
while( m ) {
+ DEV assert( m != rs->_self );
if( m->state().primary() && m->hbinfo().up() ) {
- if( p ) throw "twomasters"; // our polling is asynchronous, so this is often ok.
+ if( p ) {
+ two = true;
+ return 0;
+ }
p = m;
}
m = m->next();
@@ -63,7 +68,11 @@ namespace mongo {
if( rs->box.getPrimary() == m )
return;
rs->_self->lhb() = "";
- rs->box.set(rs->iAmArbiterOnly() ? MemberState::RS_ARBITER : MemberState::RS_RECOVERING, m);
+ if( rs->iAmArbiterOnly() ) {
+ rs->box.set(MemberState::RS_ARBITER, m);
+ } else {
+ rs->box.noteRemoteIsPrimary(m);
+ }
}
/** called as the health threads get new results */
@@ -87,11 +96,14 @@ namespace mongo {
}
const Member *p2;
- try { p2 = findOtherPrimary(); }
- catch(string s) {
- /* two other nodes think they are primary (asynchronously polled) -- wait for things to settle down. */
- log() << "replSet warning DIAG 2 primary" << s << rsLog;
- return;
+ {
+ bool two;
+ p2 = findOtherPrimary(two);
+ if( two ) {
+ /* two other nodes think they are primary (asynchronously polled) -- wait for things to settle down. */
+ log() << "replSet warning DIAG two primaries (transiently)" << rsLog;
+ return;
+ }
}
if( p2 ) {
@@ -136,7 +148,7 @@ namespace mongo {
return;
}
- if( !rs->elect.aMajoritySeemsToBeUp() ) {
+ if( rs->elect.shouldRelinquish() ) {
log() << "replSet can't see a majority of the set, relinquishing primary" << rsLog;
rs->relinquish();
}
diff --git a/db/repl/replset_commands.cpp b/db/repl/replset_commands.cpp
index f8f46d5..328b0ab 100644
--- a/db/repl/replset_commands.cpp
+++ b/db/repl/replset_commands.cpp
@@ -34,19 +34,27 @@ namespace mongo {
*/
bool replSetBlind = false;
+ unsigned replSetForceInitialSyncFailure = 0;
class CmdReplSetTest : public ReplSetCommand {
public:
virtual void help( stringstream &help ) const {
- help << "Just for testing : do not use.\n";
+ help << "Just for regression tests.\n";
}
CmdReplSetTest() : ReplSetCommand("replSetTest") { }
virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ log() << "replSet replSetTest command received: " << cmdObj.toString() << rsLog;
+ if( cmdObj.hasElement("forceInitialSyncFailure") ) {
+ replSetForceInitialSyncFailure = (unsigned) cmdObj["forceInitialSyncFailure"].Number();
+ return true;
+ }
+
+ // may not need this, but if removed check all tests still work:
if( !check(errmsg, result) )
return false;
+
if( cmdObj.hasElement("blind") ) {
replSetBlind = cmdObj.getBoolField("blind");
- log() << "replSet info replSetTest command received, replSetBlind=" << replSetBlind << rsLog;
return true;
}
return false;
@@ -55,6 +63,7 @@ namespace mongo {
class CmdReplSetGetRBID : public ReplSetCommand {
public:
+ /* todo: ideally this should only change on rollbacks NOT on mongod restarts also. fix... */
int rbid;
virtual void help( stringstream &help ) const {
help << "internal";
@@ -65,12 +74,15 @@ namespace mongo {
virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
if( !check(errmsg, result) )
return false;
- result.append("rbid",rbid);
+ result.append("rbid",rbid);
return true;
}
} cmdReplSetRBID;
using namespace bson;
+ void incRBID() {
+ cmdReplSetRBID.rbid++;
+ }
int getRBID(DBClientConnection *c) {
bo info;
c->simpleCommand("admin", &info, "replSetGetRBID");
diff --git a/db/repl/rs.cpp b/db/repl/rs.cpp
index a6737be..1c0444a 100644
--- a/db/repl/rs.cpp
+++ b/db/repl/rs.cpp
@@ -31,39 +31,48 @@ namespace mongo {
extern string *discoveredSeed;
void ReplSetImpl::sethbmsg(string s, int logLevel) {
- static time_t lastLogged;
- if( s == _hbmsg ) {
- // unchanged
- if( time(0)-lastLogged < 60 )
- return;
- }
-
- unsigned sz = s.size();
- if( sz >= 256 )
- memcpy(_hbmsg, s.c_str(), 255);
- else {
- _hbmsg[sz] = 0;
- memcpy(_hbmsg, s.c_str(), sz);
- }
- if( !s.empty() ) {
- lastLogged = time(0);
- log(logLevel) << "replSet " << s << rsLog;
- }
+ static time_t lastLogged;
+ _hbmsgTime = time(0);
+
+ if( s == _hbmsg ) {
+ // unchanged
+ if( _hbmsgTime - lastLogged < 60 )
+ return;
+ }
+
+ unsigned sz = s.size();
+ if( sz >= 256 )
+ memcpy(_hbmsg, s.c_str(), 255);
+ else {
+ _hbmsg[sz] = 0;
+ memcpy(_hbmsg, s.c_str(), sz);
+ }
+ if( !s.empty() ) {
+ lastLogged = _hbmsgTime;
+ log(logLevel) << "replSet " << s << rsLog;
+ }
}
void ReplSetImpl::assumePrimary() {
assert( iAmPotentiallyHot() );
writelock lk("admin."); // so we are synchronized with _logOp()
box.setSelfPrimary(_self);
- log() << "replSet PRIMARY" << rsLog; // self (" << _self->id() << ") is now primary" << rsLog;
+ //log() << "replSet PRIMARY" << rsLog; // self (" << _self->id() << ") is now primary" << rsLog;
}
void ReplSetImpl::changeState(MemberState s) { box.change(s, _self); }
void ReplSetImpl::relinquish() {
if( box.getState().primary() ) {
+ log() << "replSet relinquishing primary state" << rsLog;
changeState(MemberState::RS_RECOVERING);
- log() << "replSet info relinquished primary state" << rsLog;
+
+ /* close sockets that were talking to us */
+ /*log() << "replSet closing sockets after reqlinquishing primary" << rsLog;
+ MessagingPort::closeAllSockets(1);*/
+
+ // todo: >
+ //changeState(MemberState::RS_SECONDARY);
}
else if( box.getState().startup2() ) {
// ? add comment
@@ -109,11 +118,18 @@ namespace mongo {
}
void ReplSetImpl::_fillIsMasterHost(const Member *m, vector<string>& hosts, vector<string>& passives, vector<string>& arbiters) {
+ if( m->config().hidden )
+ return;
+
if( m->potentiallyHot() ) {
hosts.push_back(m->h().toString());
}
else if( !m->config().arbiterOnly ) {
- passives.push_back(m->h().toString());
+ if( m->config().slaveDelay ) {
+ /* hmmm - we don't list these as they are stale. */
+ } else {
+ passives.push_back(m->h().toString());
+ }
}
else {
arbiters.push_back(m->h().toString());
@@ -152,6 +168,10 @@ namespace mongo {
}
if( myConfig().arbiterOnly )
b.append("arbiterOnly", true);
+ if( myConfig().slaveDelay )
+ b.append("slaveDelay", myConfig().slaveDelay);
+ if( myConfig().hidden )
+ b.append("hidden", true);
}
/** @param cfgString <setname>/<seedhost1>,<seedhost2> */
@@ -200,6 +220,7 @@ namespace mongo {
_self(0),
mgr( new Manager(this) )
{
+ _cfg = 0;
memset(_hbmsg, 0, sizeof(_hbmsg));
*_hbmsg = '.'; // temp...just to see
lastH = 0;
@@ -245,7 +266,7 @@ namespace mongo {
loadLastOpTimeWritten();
}
catch(std::exception& e) {
- log() << "replSet ERROR FATAL couldn't query the local " << rsoplog << " collection. Terminating mongod after 30 seconds." << rsLog;
+ log() << "replSet error fatal couldn't query the local " << rsoplog << " collection. Terminating mongod after 30 seconds." << rsLog;
log() << e.what() << rsLog;
sleepsecs(30);
dbexit( EXIT_REPLICATION_ERROR );
@@ -260,25 +281,64 @@ namespace mongo {
ReplSetImpl::StartupStatus ReplSetImpl::startupStatus = PRESTART;
string ReplSetImpl::startupStatusMsg;
- // true if ok; throws if config really bad; false if config doesn't include self
- bool ReplSetImpl::initFromConfig(ReplSetConfig& c) {
+ extern BSONObj *getLastErrorDefault;
+
+ /** @param reconf true if this is a reconfiguration and not an initial load of the configuration.
+ @return true if ok; throws if config really bad; false if config doesn't include self
+ */
+ bool ReplSetImpl::initFromConfig(ReplSetConfig& c, bool reconf) {
+ /* NOTE: haveNewConfig() writes the new config to disk before we get here. So
+ we cannot error out at this point, except fatally. Check errors earlier.
+ */
lock lk(this);
+ if( getLastErrorDefault || !c.getLastErrorDefaults.isEmpty() ) {
+ // see comment in dbcommands.cpp for getlasterrordefault
+ getLastErrorDefault = new BSONObj( c.getLastErrorDefaults );
+ }
+
+ list<const ReplSetConfig::MemberCfg*> newOnes;
+ bool additive = reconf;
{
+ unsigned nfound = 0;
int me = 0;
for( vector<ReplSetConfig::MemberCfg>::iterator i = c.members.begin(); i != c.members.end(); i++ ) {
const ReplSetConfig::MemberCfg& m = *i;
if( m.h.isSelf() ) {
+ nfound++;
me++;
+
+ if( !reconf || (_self && _self->id() == (unsigned) m._id) )
+ ;
+ else {
+ log() << "replSet " << _self->id() << ' ' << m._id << rsLog;
+ assert(false);
+ }
+ }
+ else if( reconf ) {
+ const Member *old = findById(m._id);
+ if( old ) {
+ nfound++;
+ assert( (int) old->id() == m._id );
+ if( old->config() == m ) {
+ additive = false;
+ }
+ }
+ else {
+ newOnes.push_back(&m);
+ }
}
}
if( me == 0 ) {
// log() << "replSet config : " << _cfg->toString() << rsLog;
- log() << "replSet warning can't find self in the repl set configuration:" << rsLog;
+ log() << "replSet error can't find self in the repl set configuration:" << rsLog;
log() << c.toString() << rsLog;
- return false;
+ assert(false);
}
uassert( 13302, "replSet error self appears twice in the repl set configuration", me<=1 );
+
+ if( reconf && config().members.size() != nfound )
+ additive = false;
}
_cfg = new ReplSetConfig(c);
@@ -287,6 +347,24 @@ namespace mongo {
_name = _cfg->_id;
assert( !_name.empty() );
+ if( additive ) {
+ log() << "replSet info : additive change to configuration" << rsLog;
+ for( list<const ReplSetConfig::MemberCfg*>::const_iterator i = newOnes.begin(); i != newOnes.end(); i++ ) {
+ const ReplSetConfig::MemberCfg* m = *i;
+ Member *mi = new Member(m->h, m->_id, m, false);
+
+ /** we will indicate that new members are up() initially so that we don't relinquish our
+ primary state because we can't (transiently) see a majority. they should be up as we
+ check that new members are up before getting here on reconfig anyway.
+ */
+ mi->get_hbinfo().health = 0.1;
+
+ _members.push(mi);
+ startHealthTaskFor(mi);
+ }
+ return true;
+ }
+
// start with no members. if this is a reconfig, drop the old ones.
_members.orphanAll();
@@ -416,6 +494,7 @@ namespace mongo {
startupStatusMsg = "replSet error loading set config (BADCONFIG)";
log() << "replSet error loading configurations " << e.toString() << rsLog;
log() << "replSet error replication will not start" << rsLog;
+ sethbmsg("error loading set config");
_fatal();
throw;
}
@@ -429,11 +508,10 @@ namespace mongo {
{
//lock l(this);
box.set(MemberState::RS_FATAL, 0);
- sethbmsg("fatal error");
- log() << "replSet error fatal error, stopping replication" << rsLog;
+ //sethbmsg("fatal error");
+ log() << "replSet error fatal, stopping replication" << rsLog;
}
-
void ReplSet::haveNewConfig(ReplSetConfig& newConfig, bool addComment) {
lock l(this); // convention is to lock replset before taking the db rwlock
writelock lk("");
@@ -442,7 +520,7 @@ namespace mongo {
comment = BSON( "msg" << "Reconfig set" << "version" << newConfig.version );
newConfig.saveConfigLocally(comment);
try {
- initFromConfig(newConfig);
+ initFromConfig(newConfig, true);
log() << "replSet replSetReconfig new config saved locally" << rsLog;
}
catch(DBException& e) {
diff --git a/db/repl/rs.h b/db/repl/rs.h
index 17a070c..6c4d9a8 100644
--- a/db/repl/rs.h
+++ b/db/repl/rs.h
@@ -44,19 +44,19 @@ namespace mongo {
public:
Member(HostAndPort h, unsigned ord, const ReplSetConfig::MemberCfg *c, bool self);
string fullName() const { return h().toString(); }
- const ReplSetConfig::MemberCfg& config() const { return *_config; }
+ const ReplSetConfig::MemberCfg& config() const { return _config; }
const HeartbeatInfo& hbinfo() const { return _hbinfo; }
- string lhb() { return _hbinfo.lastHeartbeatMsg; }
+ HeartbeatInfo& get_hbinfo() { return _hbinfo; }
+ string lhb() const { return _hbinfo.lastHeartbeatMsg; }
MemberState state() const { return _hbinfo.hbstate; }
const HostAndPort& h() const { return _h; }
unsigned id() const { return _hbinfo.id(); }
- bool potentiallyHot() const { return _config->potentiallyHot(); } // not arbiter, not priority 0
-
+ bool potentiallyHot() const { return _config.potentiallyHot(); } // not arbiter, not priority 0
void summarizeMember(stringstream& s) const;
friend class ReplSetImpl;
private:
- const ReplSetConfig::MemberCfg *_config; /* todo: when this changes??? */
- HostAndPort _h;
+ const ReplSetConfig::MemberCfg _config;
+ const HostAndPort _h;
HeartbeatInfo _hbinfo;
};
@@ -64,7 +64,13 @@ namespace mongo {
ReplSetImpl *rs;
bool busyWithElectSelf;
int _primary;
- const Member* findOtherPrimary();
+
+ /** @param two - if true two primaries were seen. this can happen transiently, in addition to our
+ polling being only occasional. in this case null is returned, but the caller should
+ not assume primary itself in that situation.
+ */
+ const Member* findOtherPrimary(bool& two);
+
void noteARemoteIsPrimary(const Member *);
virtual void starting();
public:
@@ -102,6 +108,7 @@ namespace mongo {
int totalVotes() const;
bool aMajoritySeemsToBeUp() const;
+ bool shouldRelinquish() const;
void electSelf();
void electCmdReceived(BSONObj, BSONObjBuilder*);
void multiCommand(BSONObj cmd, list<Target>& L);
@@ -119,8 +126,8 @@ namespace mongo {
protected:
RSBase() : magic(0x12345677), m("RSBase"), _locked(0) { }
~RSBase() {
- log() << "~RSBase should never be called?" << rsLog;
- assert(false);
+ /* this can happen if we throw in the constructor; otherwise never happens. thus we log it as it is quite unusual. */
+ log() << "replSet ~RSBase called" << rsLog;
}
class lock {
@@ -175,6 +182,9 @@ namespace mongo {
const Member* getPrimary() const { return sp.primary; }
void change(MemberState s, const Member *self) {
scoped_lock lk(m);
+ if( sp.state != s ) {
+ log() << "replSet " << s.toString() << rsLog;
+ }
sp.state = s;
if( s.primary() ) {
sp.primary = self;
@@ -194,6 +204,12 @@ namespace mongo {
assert( !sp.state.primary() );
sp.primary = mem;
}
+ void noteRemoteIsPrimary(const Member *remote) {
+ scoped_lock lk(m);
+ if( !sp.state.secondary() && !sp.state.fatal() )
+ sp.state = MemberState::RS_RECOVERING;
+ sp.primary = remote;
+ }
StateBox() : m("StateBox") { }
private:
mutex m;
@@ -226,7 +242,6 @@ namespace mongo {
};
static StartupStatus startupStatus;
static string startupStatusMsg;
- static string stateAsStr(MemberState state);
static string stateAsHtml(MemberState state);
/* todo thread */
@@ -241,28 +256,24 @@ namespace mongo {
void endOldHealthTasks();
void startHealthTaskFor(Member *m);
- private:
Consensus elect;
- bool ok() const { return !box.getState().fatal(); }
-
void relinquish();
void forgetPrimary();
-
protected:
bool _stepDown();
private:
void assumePrimary();
void loadLastOpTimeWritten();
void changeState(MemberState s);
-
protected:
// "heartbeat message"
// sent in requestHeartbeat respond in field "hbm"
char _hbmsg[256]; // we change this unlocked, thus not an stl::string
+ time_t _hbmsgTime; // when it was logged
public:
void sethbmsg(string s, int logLevel = 0);
protected:
- bool initFromConfig(ReplSetConfig& c); // true if ok; throws if config really bad; false if config doesn't include self
+ bool initFromConfig(ReplSetConfig& c, bool reconf=false); // true if ok; throws if config really bad; false if config doesn't include self
void _fillIsMaster(BSONObjBuilder&);
void _fillIsMasterHost(const Member*, vector<string>&, vector<string>&, vector<string>&);
const ReplSetConfig& config() { return *_cfg; }
@@ -323,8 +334,10 @@ namespace mongo {
void _syncDoInitialSync();
void syncDoInitialSync();
void _syncThread();
+ bool tryToGoLiveAsASecondary(OpTime&); // readlocks
void syncTail();
void syncApply(const BSONObj &o);
+ unsigned _syncRollback(OplogReader& r);
void syncRollback(OplogReader& r);
void syncFixUp(HowToFixUp& h, OplogReader& r);
public:
@@ -344,9 +357,10 @@ namespace mongo {
/* call after constructing to start - returns fairly quickly after la[unching its threads */
void go() { _go(); }
+
void fatal() { _fatal(); }
- bool isPrimary();
- bool isSecondary();
+ bool isPrimary() { return box.getState().primary(); }
+ bool isSecondary() { return box.getState().secondary(); }
MemberState state() const { return ReplSetImpl::state(); }
string name() const { return ReplSetImpl::name(); }
const ReplSetConfig& config() { return ReplSetImpl::config(); }
@@ -366,7 +380,10 @@ namespace mongo {
bool lockedByMe() { return RSBase::lockedByMe(); }
// heartbeat msg to send to others; descriptive diagnostic info
- string hbmsg() const { return _hbmsg; }
+ string hbmsg() const {
+ if( time(0)-_hbmsgTime > 120 ) return "";
+ return _hbmsg;
+ }
};
/** base class for repl set commands. checks basic things such as in rs mode before the command
@@ -388,6 +405,8 @@ namespace mongo {
if( theReplSet == 0 ) {
result.append("startupStatus", ReplSet::startupStatus);
errmsg = ReplSet::startupStatusMsg.empty() ? "replset unknown error 2" : ReplSet::startupStatusMsg;
+ if( ReplSet::startupStatus == 3 )
+ result.append("info", "run rs.initiate(...) if not yet done for the set");
return false;
}
return true;
@@ -397,19 +416,10 @@ namespace mongo {
/** inlines ----------------- */
inline Member::Member(HostAndPort h, unsigned ord, const ReplSetConfig::MemberCfg *c, bool self) :
- _config(c), _h(h), _hbinfo(ord) {
- if( self ) {
- _hbinfo.health = 1.0;
- }
- }
-
- inline bool ReplSet::isPrimary() {
- /* todo replset */
- return box.getState().primary();
- }
-
- inline bool ReplSet::isSecondary() {
- return box.getState().secondary();
+ _config(*c), _h(h), _hbinfo(ord)
+ {
+ if( self )
+ _hbinfo.health = 1.0;
}
}
diff --git a/db/repl/rs_config.cpp b/db/repl/rs_config.cpp
index 76b20a4..85c9a46 100644
--- a/db/repl/rs_config.cpp
+++ b/db/repl/rs_config.cpp
@@ -31,6 +31,16 @@ namespace mongo {
void logOpInitiate(const bo&);
+ void assertOnlyHas(BSONObj o, const set<string>& fields) {
+ BSONObj::iterator i(o);
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if( !fields.count( e.fieldName() ) ) {
+ uasserted(13434, str::stream() << "unexpected field '" << e.fieldName() << "'in object");
+ }
+ }
+ }
+
list<HostAndPort> ReplSetConfig::otherMemberHostnames() const {
list<HostAndPort> L;
for( vector<MemberCfg>::const_iterator i = members.begin(); i != members.end(); i++ ) {
@@ -42,7 +52,7 @@ namespace mongo {
/* comment MUST only be set when initiating the set by the initiator */
void ReplSetConfig::saveConfigLocally(bo comment) {
- check();
+ checkRsConfig();
log() << "replSet info saving a newer config version to local.system.replset" << rsLog;
{
writelock lk("");
@@ -81,6 +91,8 @@ namespace mongo {
if( votes != 1 ) b << "votes" << votes;
if( priority != 1.0 ) b << "priority" << priority;
if( arbiterOnly ) b << "arbiterOnly" << true;
+ if( slaveDelay ) b << "slaveDelay" << slaveDelay;
+ if( hidden ) b << "hidden" << hidden;
return b.obj();
}
@@ -115,9 +127,17 @@ namespace mongo {
mchk(priority >= 0 && priority <= 1000);
mchk(votes >= 0 && votes <= 100);
uassert(13419, "this version of mongod only supports priorities 0 and 1", priority == 0 || priority == 1);
+ uassert(13437, "slaveDelay requires priority be zero", slaveDelay == 0 || priority == 0);
+ uassert(13438, "bad slaveDelay value", slaveDelay >= 0 && slaveDelay <= 3600 * 24 * 366);
+ uassert(13439, "priority must be 0 when hidden=true", priority == 0 || !hidden);
}
+ /** @param o old config
+ @param n new config
+ */
/*static*/ bool ReplSetConfig::legalChange(const ReplSetConfig& o, const ReplSetConfig& n, string& errmsg) {
+ assert( theReplSet );
+
if( o._id != n._id ) {
errmsg = "set name may not change";
return false;
@@ -131,6 +151,25 @@ namespace mongo {
return false;
}
+ map<HostAndPort,const ReplSetConfig::MemberCfg*> old;
+ for( vector<ReplSetConfig::MemberCfg>::const_iterator i = o.members.begin(); i != o.members.end(); i++ ) {
+ old[i->h] = &(*i);
+ }
+ int me = 0;
+ for( vector<ReplSetConfig::MemberCfg>::const_iterator i = n.members.begin(); i != n.members.end(); i++ ) {
+ const ReplSetConfig::MemberCfg& m = *i;
+ if( old.count(m.h) ) {
+ if( old[m.h]->_id != m._id ) {
+ log() << "replSet reconfig error with member: " << m.h.toString() << rsLog;
+ uasserted(13432, "_id may not change for members");
+ }
+ }
+ if( m.h.isSelf() )
+ me++;
+ }
+
+ uassert(13433, "can't find self in new replset config", me == 1);
+
/* TODO : MORE CHECKS HERE */
log() << "replSet TODO : don't allow removal of a node until we handle it at the removed node end?" << endl;
@@ -144,7 +183,7 @@ namespace mongo {
_ok = false;
}
- void ReplSetConfig::check() const {
+ void ReplSetConfig::checkRsConfig() const {
uassert(13132,
"nonmatching repl set name in _id field; check --replSet command line",
_id == cmdLine.ourSetName());
@@ -154,6 +193,10 @@ namespace mongo {
}
void ReplSetConfig::from(BSONObj o) {
+ static const string legal[] = {"_id","version", "members","settings"};
+ static const set<string> legals(legal, legal + 4);
+ assertOnlyHas(o, legals);
+
md5 = o.md5();
_id = o["_id"].String();
if( o["version"].ok() ) {
@@ -170,7 +213,7 @@ namespace mongo {
if( settings["heartbeatTimeout"].ok() )
ho.heartbeatTimeoutMillis = (unsigned) (settings["heartbeatTimeout"].Number() * 1000);
ho.check();
- try { getLastErrorDefaults = settings["getLastErrorDefaults"].Obj(); } catch(...) { }
+ try { getLastErrorDefaults = settings["getLastErrorDefaults"].Obj().copy(); } catch(...) { }
}
set<string> hosts;
@@ -188,6 +231,10 @@ namespace mongo {
BSONObj mobj = members[i].Obj();
MemberCfg m;
try {
+ static const string legal[] = {"_id","votes","priority","host","hidden","slaveDelay","arbiterOnly"};
+ static const set<string> legals(legal, legal + 7);
+ assertOnlyHas(mobj, legals);
+
try {
m._id = (int) mobj["_id"].Number();
} catch(...) {
@@ -205,6 +252,9 @@ namespace mongo {
if( m.h.isLocalHost() )
localhosts++;
m.arbiterOnly = mobj.getBoolField("arbiterOnly");
+ m.slaveDelay = mobj["slaveDelay"].numberInt();
+ if( mobj.hasElement("hidden") )
+ m.hidden = mobj.getBoolField("hidden");
if( mobj.hasElement("priority") )
m.priority = mobj["priority"].Number();
if( mobj.hasElement("votes") )
@@ -308,6 +358,7 @@ namespace mongo {
BSONObj o = c->nextSafe();
uassert(13109, "multiple rows in " + rsConfigNs + " not supported", !c->more());
from(o);
+ checkRsConfig();
_ok = true;
log(level) << "replSet load config ok from " << (h.isSelf() ? "self" : h.toString()) << rsLog;
}
diff --git a/db/repl/rs_config.h b/db/repl/rs_config.h
index 38df772..e39dad7 100644
--- a/db/repl/rs_config.h
+++ b/db/repl/rs_config.h
@@ -41,18 +41,27 @@ namespace mongo {
bool ok() const { return _ok; }
struct MemberCfg {
- MemberCfg() : _id(-1), votes(1), priority(1.0), arbiterOnly(false) { }
+ MemberCfg() : _id(-1), votes(1), priority(1.0), arbiterOnly(false), slaveDelay(0), hidden(false) { }
int _id; /* ordinal */
unsigned votes; /* how many votes this node gets. default 1. */
HostAndPort h;
double priority; /* 0 means can never be primary */
bool arbiterOnly;
+ int slaveDelay; /* seconds. int rather than unsigned for convenient to/front bson conversion. */
+ bool hidden; /* if set, don't advertise to drives in isMaster. for non-primaries (priority 0) */
+
void check() const; /* check validity, assert if not. */
BSONObj asBson() const;
bool potentiallyHot() const {
return !arbiterOnly && priority > 0;
}
+ bool operator==(const MemberCfg& r) const {
+ return _id==r._id && votes == r.votes && h == r.h && priority == r.priority &&
+ arbiterOnly == r.arbiterOnly && slaveDelay == r.slaveDelay && hidden == r.hidden;
+ }
+ bool operator!=(const MemberCfg& r) const { return !(*this == r); }
};
+
vector<MemberCfg> members;
string _id;
int version;
@@ -68,7 +77,7 @@ namespace mongo {
string toString() const { return asBson().toString(); }
/** validate the settings. does not call check() on each member, you have to do that separately. */
- void check() const;
+ void checkRsConfig() const;
/** check if modification makes sense */
static bool legalChange(const ReplSetConfig& old, const ReplSetConfig& n, string& errmsg);
diff --git a/db/repl/rs_initialsync.cpp b/db/repl/rs_initialsync.cpp
index 4c6bd4d..3851c66 100644
--- a/db/repl/rs_initialsync.cpp
+++ b/db/repl/rs_initialsync.cpp
@@ -66,7 +66,7 @@ namespace mongo {
void _logOpObjRS(const BSONObj& op);
- bool copyCollectionFromRemote(const string& host, const string& ns, const BSONObj& query, string errmsg);
+ bool copyCollectionFromRemote(const string& host, const string& ns, const BSONObj& query, string &errmsg, bool logforrepl);
static void emptyOplog() {
writelock lk(rsoplog);
diff --git a/db/repl/rs_member.h b/db/repl/rs_member.h
index 4f6846a..6a797b5 100644
--- a/db/repl/rs_member.h
+++ b/db/repl/rs_member.h
@@ -40,7 +40,8 @@ namespace mongo {
RS_STARTUP2,
RS_UNKNOWN, /* remote node not yet reached */
RS_ARBITER,
- RS_DOWN /* node not reachable for a report */
+ RS_DOWN, /* node not reachable for a report */
+ RS_ROLLBACK
} s;
MemberState(MS ms = RS_UNKNOWN) : s(ms) { }
@@ -51,6 +52,9 @@ namespace mongo {
bool recovering() const { return s == RS_RECOVERING; }
bool startup2() const { return s == RS_STARTUP2; }
bool fatal() const { return s == RS_FATAL; }
+ bool rollback() const { return s == RS_ROLLBACK; }
+
+ string toString() const;
bool operator==(const MemberState& r) const { return s == r.s; }
bool operator!=(const MemberState& r) const { return s != r.s; }
@@ -61,26 +65,31 @@ namespace mongo {
class HeartbeatInfo {
unsigned _id;
public:
- HeartbeatInfo() : _id(0xffffffff),skew(INT_MIN) { }
+ HeartbeatInfo() : _id(0xffffffff),hbstate(MemberState::RS_UNKNOWN),health(-1.0),downSince(0),skew(INT_MIN) { }
HeartbeatInfo(unsigned id);
bool up() const { return health > 0; }
unsigned id() const { return _id; }
MemberState hbstate;
double health;
time_t upSince;
+ long long downSince;
time_t lastHeartbeat;
string lastHeartbeatMsg;
OpTime opTime;
int skew;
+ long long timeDown() const; // ms
+
/* true if changed in a way of interest to the repl set manager. */
bool changed(const HeartbeatInfo& old) const;
};
inline HeartbeatInfo::HeartbeatInfo(unsigned id) : _id(id) {
- health = -1.0;
- lastHeartbeat = upSince = 0;
- skew = INT_MIN;
+ hbstate = MemberState::RS_UNKNOWN;
+ health = -1.0;
+ downSince = 0;
+ lastHeartbeat = upSince = 0;
+ skew = INT_MIN;
}
inline bool HeartbeatInfo::changed(const HeartbeatInfo& old) const {
diff --git a/db/repl/rs_rollback.cpp b/db/repl/rs_rollback.cpp
index 1bb7217..6b2544c 100644
--- a/db/repl/rs_rollback.cpp
+++ b/db/repl/rs_rollback.cpp
@@ -62,6 +62,14 @@ namespace mongo {
using namespace bson;
+ bool copyCollectionFromRemote(const string& host, const string& ns, const BSONObj& query, string& errmsg, bool logforrepl);
+ void incRBID();
+
+ class rsfatal : public std::exception {
+ public:
+ virtual const char* what() const throw(){ return "replica set fatal exception"; }
+ };
+
struct DocID {
const char *ns;
be _id;
@@ -81,6 +89,8 @@ namespace mongo {
/* collections to drop */
set<string> toDrop;
+ set<string> collectionsToResync;
+
OpTime commonPoint;
DiskLoc commonPointOurDiskloc;
@@ -113,17 +123,59 @@ namespace mongo {
if( *op == 'c' ) {
be first = o.firstElement();
NamespaceString s(d.ns); // foo.$cmd
-
- if( string("create") == first.fieldName() ) {
- /* Create collection operation
- { ts: ..., h: ..., op: "c", ns: "foo.$cmd", o: { create: "abc", ... } }
- */
- string ns = s.db + '.' + o["create"].String(); // -> foo.abc
- h.toDrop.insert(ns);
+ string cmdname = first.fieldName();
+ Command *cmd = Command::findCommand(cmdname.c_str());
+ if( cmd == 0 ) {
+ log() << "replSet warning rollback no suchcommand " << first.fieldName() << " - different mongod versions perhaps?" << rsLog;
return;
}
- else {
- log() << "replSet WARNING can't roll back this command yet: " << o.toString() << rsLog;
+ else {
+ /* findandmodify - tranlated?
+ godinsert?,
+ renamecollection a->b. just resync a & b
+ */
+ if( cmdname == "create" ) {
+ /* Create collection operation
+ { ts: ..., h: ..., op: "c", ns: "foo.$cmd", o: { create: "abc", ... } }
+ */
+ string ns = s.db + '.' + o["create"].String(); // -> foo.abc
+ h.toDrop.insert(ns);
+ return;
+ }
+ else if( cmdname == "drop" ) {
+ string ns = s.db + '.' + first.valuestr();
+ h.collectionsToResync.insert(ns);
+ return;
+ }
+ else if( cmdname == "dropIndexes" || cmdname == "deleteIndexes" ) {
+ /* TODO: this is bad. we simply full resync the collection here, which could be very slow. */
+ log() << "replSet info rollback of dropIndexes is slow in this version of mongod" << rsLog;
+ string ns = s.db + '.' + first.valuestr();
+ h.collectionsToResync.insert(ns);
+ return;
+ }
+ else if( cmdname == "renameCollection" ) {
+ /* TODO: slow. */
+ log() << "replSet info rollback of renameCollection is slow in this version of mongod" << rsLog;
+ string from = first.valuestr();
+ string to = o["to"].String();
+ h.collectionsToResync.insert(from);
+ h.collectionsToResync.insert(to);
+ return;
+ }
+ else if( cmdname == "reIndex" ) {
+ return;
+ }
+ else if( cmdname == "dropDatabase" ) {
+ log() << "replSet error rollback : can't rollback drop database full resync will be required" << rsLog;
+ log() << "replSet " << o.toString() << rsLog;
+ throw rsfatal();
+ }
+ else {
+ log() << "replSet error can't rollback this command yet: " << o.toString() << rsLog;
+ log() << "replSet cmdname=" << cmdname << rsLog;
+ throw rsfatal();
+ }
}
}
@@ -141,8 +193,6 @@ namespace mongo {
static void syncRollbackFindCommonPoint(DBClientConnection *them, HowToFixUp& h) {
static time_t last;
if( time(0)-last < 60 ) {
- // this could put a lot of load on someone else, don't repeat too often
- sleepsecs(10);
throw "findcommonpoint waiting a while before trying again";
}
last = time(0);
@@ -170,12 +220,14 @@ namespace mongo {
BSONObj theirObj = t->nextSafe();
OpTime theirTime = theirObj["ts"]._opTime();
- if( 1 ) {
+ {
long long diff = (long long) ourTime.getSecs() - ((long long) theirTime.getSecs());
/* diff could be positive, negative, or zero */
- log() << "replSet info syncRollback diff in end of log times : " << diff << " seconds" << rsLog;
+ log() << "replSet info rollback our last optime: " << ourTime.toStringPretty() << rsLog;
+ log() << "replSet info rollback their last optime: " << theirTime.toStringPretty() << rsLog;
+ log() << "replSet info rollback diff in end of log times: " << diff << " seconds" << rsLog;
if( diff > 3600 ) {
- log() << "replSet syncRollback too long a time period for a rollback." << rsLog;
+ log() << "replSet rollback too long a time period for a rollback." << rsLog;
throw "error not willing to roll back more than one hour of data";
}
}
@@ -197,16 +249,35 @@ namespace mongo {
refetch(h, ourObj);
+ if( !t->more() ) {
+ log() << "replSet rollback error RS100 reached beginning of remote oplog" << rsLog;
+ log() << "replSet them: " << them->toString() << " scanned: " << scanned << rsLog;
+ log() << "replSet theirTime: " << theirTime.toStringLong() << rsLog;
+ log() << "replSet ourTime: " << ourTime.toStringLong() << rsLog;
+ throw "RS100 reached beginning of remote oplog [2]";
+ }
theirObj = t->nextSafe();
theirTime = theirObj["ts"]._opTime();
u.advance();
- if( !u.ok() ) throw "reached beginning of local oplog";
+ if( !u.ok() ) {
+ log() << "replSet rollback error RS101 reached beginning of local oplog" << rsLog;
+ log() << "replSet them: " << them->toString() << " scanned: " << scanned << rsLog;
+ log() << "replSet theirTime: " << theirTime.toStringLong() << rsLog;
+ log() << "replSet ourTime: " << ourTime.toStringLong() << rsLog;
+ throw "RS101 reached beginning of local oplog [1]";
+ }
ourObj = u.current();
ourTime = ourObj["ts"]._opTime();
}
else if( theirTime > ourTime ) {
- /* todo: we could hit beginning of log here. exception thrown is ok but not descriptive, so fix up */
+ if( !t->more() ) {
+ log() << "replSet rollback error RS100 reached beginning of remote oplog" << rsLog;
+ log() << "replSet them: " << them->toString() << " scanned: " << scanned << rsLog;
+ log() << "replSet theirTime: " << theirTime.toStringLong() << rsLog;
+ log() << "replSet ourTime: " << ourTime.toStringLong() << rsLog;
+ throw "RS100 reached beginning of remote oplog [1]";
+ }
theirObj = t->nextSafe();
theirTime = theirObj["ts"]._opTime();
}
@@ -214,7 +285,13 @@ namespace mongo {
// theirTime < ourTime
refetch(h, ourObj);
u.advance();
- if( !u.ok() ) throw "reached beginning of local oplog";
+ if( !u.ok() ) {
+ log() << "replSet rollback error RS101 reached beginning of local oplog" << rsLog;
+ log() << "replSet them: " << them->toString() << " scanned: " << scanned << rsLog;
+ log() << "replSet theirTime: " << theirTime.toStringLong() << rsLog;
+ log() << "replSet ourTime: " << ourTime.toStringLong() << rsLog;
+ throw "RS101 reached beginning of local oplog [2]";
+ }
ourObj = u.current();
ourTime = ourObj["ts"]._opTime();
}
@@ -226,7 +303,19 @@ namespace mongo {
bson::bo goodVersionOfObject;
};
- void ReplSetImpl::syncFixUp(HowToFixUp& h, OplogReader& r) {
+ static void setMinValid(bo newMinValid) {
+ try {
+ log() << "replSet minvalid=" << newMinValid["ts"]._opTime().toStringLong() << rsLog;
+ }
+ catch(...) { }
+ {
+ Helpers::putSingleton("local.replset.minvalid", newMinValid);
+ Client::Context cx( "local." );
+ cx.db()->flushFiles(true);
+ }
+ }
+
+ void ReplSetImpl::syncFixUp(HowToFixUp& h, OplogReader& r) {
DBClientConnection *them = r.conn();
// fetch all first so we needn't handle interruption in a fancy way
@@ -237,6 +326,7 @@ namespace mongo {
bo newMinValid;
+ /* fetch all the goodVersions of each document from current primary */
DocID d;
unsigned long long n = 0;
try {
@@ -258,43 +348,98 @@ namespace mongo {
}
newMinValid = r.getLastOp(rsoplog);
if( newMinValid.isEmpty() ) {
- sethbmsg("syncRollback error newMinValid empty?");
+ sethbmsg("rollback error newMinValid empty?");
return;
}
}
catch(DBException& e) {
- sethbmsg(str::stream() << "syncRollback re-get objects: " << e.toString(),0);
- log() << "syncRollback couldn't re-get ns:" << d.ns << " _id:" << d._id << ' ' << n << '/' << h.toRefetch.size() << rsLog;
+ sethbmsg(str::stream() << "rollback re-get objects: " << e.toString(),0);
+ log() << "rollback couldn't re-get ns:" << d.ns << " _id:" << d._id << ' ' << n << '/' << h.toRefetch.size() << rsLog;
throw e;
}
- sethbmsg("syncRollback 3.5");
+ MemoryMappedFile::flushAll(true);
+
+ sethbmsg("rollback 3.5");
if( h.rbid != getRBID(r.conn()) ) {
// our source rolled back itself. so the data we received isn't necessarily consistent.
- sethbmsg("syncRollback rbid on source changed during rollback, cancelling this attempt");
+ sethbmsg("rollback rbid on source changed during rollback, cancelling this attempt");
return;
}
// update them
- sethbmsg(str::stream() << "syncRollback 4 n:" << goodVersions.size());
+ sethbmsg(str::stream() << "rollback 4 n:" << goodVersions.size());
bool warn = false;
assert( !h.commonPointOurDiskloc.isNull() );
- MemoryMappedFile::flushAll(true);
-
dbMutex.assertWriteLocked();
/* we have items we are writing that aren't from a point-in-time. thus best not to come online
until we get to that point in freshness. */
- try {
- log() << "replSet set minvalid=" << newMinValid["ts"]._opTime().toString() << rsLog;
+ setMinValid(newMinValid);
+
+ /** any full collection resyncs required? */
+ if( !h.collectionsToResync.empty() ) {
+ for( set<string>::iterator i = h.collectionsToResync.begin(); i != h.collectionsToResync.end(); i++ ) {
+ string ns = *i;
+ sethbmsg(str::stream() << "rollback 4.1 coll resync " << ns);
+ Client::Context c(*i, dbpath, 0, /*doauth*/false);
+ try {
+ bob res;
+ string errmsg;
+ dropCollection(ns, errmsg, res);
+ {
+ dbtemprelease r;
+ bool ok = copyCollectionFromRemote(them->getServerAddress(), ns, bo(), errmsg, false);
+ if( !ok ) {
+ log() << "replSet rollback error resyncing collection " << ns << ' ' << errmsg << rsLog;
+ throw "rollback error resyncing rollection [1]";
+ }
+ }
+ }
+ catch(...) {
+ log() << "replset rollback error resyncing collection " << ns << rsLog;
+ throw "rollback error resyncing rollection [2]";
+ }
+ }
+
+ /* we did more reading from primary, so check it again for a rollback (which would mess us up), and
+ make minValid newer.
+ */
+ sethbmsg("rollback 4.2");
+ {
+ string err;
+ try {
+ newMinValid = r.getLastOp(rsoplog);
+ if( newMinValid.isEmpty() ) {
+ err = "can't get minvalid from primary";
+ } else {
+ setMinValid(newMinValid);
+ }
+ }
+ catch(...) {
+ err = "can't get/set minvalid";
+ }
+ if( h.rbid != getRBID(r.conn()) ) {
+ // our source rolled back itself. so the data we received isn't necessarily consistent.
+ // however, we've now done writes. thus we have a problem.
+ err += "rbid at primary changed during resync/rollback";
+ }
+ if( !err.empty() ) {
+ log() << "replSet error rolling back : " << err << ". A full resync will be necessary." << rsLog;
+ /* todo: reset minvalid so that we are permanently in fatal state */
+ /* todo: don't be fatal, but rather, get all the data first. */
+ sethbmsg("rollback error");
+ throw rsfatal();
+ }
+ }
+ sethbmsg("rollback 4.3");
}
- catch(...){}
- Helpers::putSingleton("local.replset.minvalid", newMinValid);
- /** first drop collections to drop - that might make things faster below actually if there were subsequent inserts */
+ sethbmsg("rollback 4.6");
+ /** drop collections to drop before doing individual fixups - that might make things faster below actually if there were subsequent inserts to rollback */
for( set<string>::iterator i = h.toDrop.begin(); i != h.toDrop.end(); i++ ) {
Client::Context c(*i, dbpath, 0, /*doauth*/false);
try {
@@ -308,6 +453,7 @@ namespace mongo {
}
}
+ sethbmsg("rollback 4.7");
Client::Context c(rsoplog, dbpath, 0, /*doauth*/false);
NamespaceDetails *oplogDetails = nsdetails(rsoplog);
uassert(13423, str::stream() << "replSet error in rollback can't find " << rsoplog, oplogDetails);
@@ -320,7 +466,12 @@ namespace mongo {
bo pattern = d._id.wrap(); // { _id : ... }
try {
assert( d.ns && *d.ns );
-
+ if( h.collectionsToResync.count(d.ns) ) {
+ /* we just synced this entire collection */
+ continue;
+ }
+
+ /* keep an archive of items rolled back */
shared_ptr<RemoveSaver>& rs = removeSavers[d.ns];
if ( ! rs )
rs.reset( new RemoveSaver( "rollback" , "" , d.ns ) );
@@ -343,7 +494,7 @@ namespace mongo {
long long start = Listener::getElapsedTimeMillis();
DiskLoc loc = Helpers::findOne(d.ns, pattern, false);
if( Listener::getElapsedTimeMillis() - start > 200 )
- log() << "replSet warning roll back slow no _id index for " << d.ns << rsLog;
+ log() << "replSet warning roll back slow no _id index for " << d.ns << " perhaps?" << rsLog;
//would be faster but requires index: DiskLoc loc = Helpers::findById(nsd, pattern);
if( !loc.isNull() ) {
try {
@@ -411,9 +562,9 @@ namespace mongo {
removeSavers.clear(); // this effectively closes all of them
- sethbmsg(str::stream() << "syncRollback 5 d:" << deletes << " u:" << updates);
+ sethbmsg(str::stream() << "rollback 5 d:" << deletes << " u:" << updates);
MemoryMappedFile::flushAll(true);
- sethbmsg("syncRollback 6");
+ sethbmsg("rollback 6");
// clean up oplog
log(2) << "replSet rollback truncate oplog after " << h.commonPoint.toStringPretty() << rsLog;
@@ -423,59 +574,99 @@ namespace mongo {
/* reset cached lastoptimewritten and h value */
loadLastOpTimeWritten();
- sethbmsg("syncRollback 7");
+ sethbmsg("rollback 7");
MemoryMappedFile::flushAll(true);
// done
if( warn )
sethbmsg("issues during syncRollback, see log");
else
- sethbmsg("syncRollback done");
+ sethbmsg("rollback done");
}
void ReplSetImpl::syncRollback(OplogReader&r) {
+ unsigned s = _syncRollback(r);
+ if( s )
+ sleepsecs(s);
+ }
+
+ unsigned ReplSetImpl::_syncRollback(OplogReader&r) {
assert( !lockedByMe() );
assert( !dbMutex.atLeastReadLocked() );
- sethbmsg("syncRollback 0");
+ sethbmsg("rollback 0");
writelocktry lk(rsoplog, 20000);
if( !lk.got() ) {
- sethbmsg("syncRollback couldn't get write lock in a reasonable time");
- sleepsecs(2);
- return;
+ sethbmsg("rollback couldn't get write lock in a reasonable time");
+ return 2;
+ }
+
+ if( box.getState().secondary() ) {
+ /* by doing this, we will not service reads (return an error as we aren't in secondary staate.
+ that perhaps is moot becasue of the write lock above, but that write lock probably gets deferred
+ or removed or yielded later anyway.
+
+ also, this is better for status reporting - we know what is happening.
+ */
+ box.change(MemberState::RS_ROLLBACK, _self);
}
HowToFixUp how;
- sethbmsg("syncRollback 1");
+ sethbmsg("rollback 1");
{
r.resetCursor();
/*DBClientConnection us(false, 0, 0);
string errmsg;
if( !us.connect(HostAndPort::me().toString(),errmsg) ) {
- sethbmsg("syncRollback connect to self failure" + errmsg);
+ sethbmsg("rollback connect to self failure" + errmsg);
return;
}*/
- sethbmsg("syncRollback 2 FindCommonPoint");
+ sethbmsg("rollback 2 FindCommonPoint");
try {
syncRollbackFindCommonPoint(r.conn(), how);
}
catch( const char *p ) {
- sethbmsg(string("syncRollback 2 error ") + p);
- sleepsecs(10);
- return;
+ sethbmsg(string("rollback 2 error ") + p);
+ return 10;
+ }
+ catch( rsfatal& ) {
+ _fatal();
+ return 2;
}
catch( DBException& e ) {
- sethbmsg(string("syncRollback 2 exception ") + e.toString() + "; sleeping 1 min");
+ sethbmsg(string("rollback 2 exception ") + e.toString() + "; sleeping 1 min");
+ dbtemprelease r;
sleepsecs(60);
throw;
}
}
- sethbmsg("replSet syncRollback 3 fixup");
+ sethbmsg("replSet rollback 3 fixup");
+
+ {
+ incRBID();
+ try {
+ syncFixUp(how, r);
+ }
+ catch( rsfatal& ) {
+ sethbmsg("rollback fixup error");
+ _fatal();
+ return 2;
+ }
+ catch(...) {
+ incRBID(); throw;
+ }
+ incRBID();
+
+ /* success - leave "ROLLBACK" state
+ can go to SECONDARY once minvalid is achieved
+ */
+ box.change(MemberState::RS_RECOVERING, _self);
+ }
- syncFixUp(how, r);
+ return 0;
}
}
diff --git a/db/repl/rs_sync.cpp b/db/repl/rs_sync.cpp
index bece96c..9ea65cf 100644
--- a/db/repl/rs_sync.cpp
+++ b/db/repl/rs_sync.cpp
@@ -24,8 +24,11 @@ namespace mongo {
using namespace bson;
+ extern unsigned replSetForceInitialSyncFailure;
+
void startSyncThread() {
Client::initThread("rs_sync");
+ cc().iAmSyncThread();
theReplSet->syncThread();
cc().shutdown();
}
@@ -91,6 +94,7 @@ namespace mongo {
// todo : use exhaust
unsigned long long n = 0;
while( 1 ) {
+
if( !r.more() )
break;
BSONObj o = r.nextSafe(); /* note we might get "not master" at some point */
@@ -103,9 +107,14 @@ namespace mongo {
anymore. assumePrimary is in the db lock so we are safe as long as
we check after we locked above. */
const Member *p1 = box.getPrimary();
- if( p1 != primary ) {
- log() << "replSet primary was:" << primary->fullName() << " now:" <<
- (p1 != 0 ? p1->fullName() : "none") << rsLog;
+ if( p1 != primary || replSetForceInitialSyncFailure ) {
+ int f = replSetForceInitialSyncFailure;
+ if( f > 0 ) {
+ replSetForceInitialSyncFailure = f-1;
+ log() << "replSet test code invoked, replSetForceInitialSyncFailure" << rsLog;
+ }
+ log() << "replSet primary was:" << primary->fullName() << " now:" <<
+ (p1 != 0 ? p1->fullName() : "none") << rsLog;
throw DBException("primary changed",0);
}
@@ -131,6 +140,32 @@ namespace mongo {
return true;
}
+ /* should be in RECOVERING state on arrival here.
+ readlocks
+ @return true if transitioned to SECONDARY
+ */
+ bool ReplSetImpl::tryToGoLiveAsASecondary(OpTime& /*out*/ minvalid) {
+ bool golive = false;
+ {
+ readlock lk("local.replset.minvalid");
+ BSONObj mv;
+ if( Helpers::getSingleton("local.replset.minvalid", mv) ) {
+ minvalid = mv["ts"]._opTime();
+ if( minvalid <= lastOpTimeWritten ) {
+ golive=true;
+ }
+ }
+ else
+ golive = true; /* must have been the original member */
+ }
+ if( golive ) {
+ sethbmsg("");
+ changeState(MemberState::RS_SECONDARY);
+ }
+ return golive;
+ }
+
+ /* tail the primary's oplog. ok to return, will be re-called. */
void ReplSetImpl::syncTail() {
// todo : locking vis a vis the mgr...
@@ -147,14 +182,19 @@ namespace mongo {
{
BSONObj remoteOldestOp = r.findOne(rsoplog, Query());
OpTime ts = remoteOldestOp["ts"]._opTime();
- DEV log() << "remoteOldestOp: " << ts.toStringPretty() << endl;
- else log(3) << "remoteOldestOp: " << ts.toStringPretty() << endl;
+ DEV log() << "replSet remoteOldestOp: " << ts.toStringLong() << rsLog;
+ else log(3) << "replSet remoteOldestOp: " << ts.toStringLong() << rsLog;
+ DEV {
+ // debugging sync1.js...
+ log() << "replSet lastOpTimeWritten: " << lastOpTimeWritten.toStringLong() << rsLog;
+ log() << "replSet our state: " << state().toString() << rsLog;
+ }
if( lastOpTimeWritten < ts ) {
- log() << "replSet error too stale to catch up, at least from primary " << hn << rsLog;
- log() << "replSet our last optime : " << lastOpTimeWritten.toStringPretty() << rsLog;
- log() << "replSet oldest at " << hn << " : " << ts.toStringPretty() << rsLog;
+ log() << "replSet error RS102 too stale to catch up, at least from primary: " << hn << rsLog;
+ log() << "replSet our last optime : " << lastOpTimeWritten.toStringLong() << rsLog;
+ log() << "replSet oldest at " << hn << " : " << ts.toStringLong() << rsLog;
log() << "replSet See http://www.mongodb.org/display/DOCS/Resyncing+a+Very+Stale+Replica+Set+Member" << rsLog;
- sethbmsg("error too stale to catch up");
+ sethbmsg("error RS102 too stale to catch up");
sleepsecs(120);
return;
}
@@ -213,7 +253,13 @@ namespace mongo {
}
}
- while( 1 ) {
+ /* we have now checked if we need to rollback and we either don't have to or did it. */
+ {
+ OpTime minvalid;
+ tryToGoLiveAsASecondary(minvalid);
+ }
+
+ while( 1 ) {
while( 1 ) {
if( !r.moreInCurrentBatch() ) {
/* we need to occasionally check some things. between
@@ -222,27 +268,13 @@ namespace mongo {
/* perhaps we should check this earlier? but not before the rollback checks. */
if( state().recovering() ) {
/* can we go to RS_SECONDARY state? we can if not too old and if minvalid achieved */
- bool golive = false;
OpTime minvalid;
- {
- readlock lk("local.replset.minvalid");
- BSONObj mv;
- if( Helpers::getSingleton("local.replset.minvalid", mv) ) {
- minvalid = mv["ts"]._opTime();
- if( minvalid <= lastOpTimeWritten ) {
- golive=true;
- }
- }
- else
- golive = true; /* must have been the original member */
- }
+ bool golive = ReplSetImpl::tryToGoLiveAsASecondary(minvalid);
if( golive ) {
- sethbmsg("");
- log() << "replSet SECONDARY" << rsLog;
- changeState(MemberState::RS_SECONDARY);
+ ;
}
else {
- sethbmsg(str::stream() << "still syncing, not yet to minValid optime " << minvalid.toString());
+ sethbmsg(str::stream() << "still syncing, not yet to minValid optime" << minvalid.toString());
}
/* todo: too stale capability */
@@ -270,12 +302,40 @@ namespace mongo {
syncApply(o);
_logOpObjRS(o); /* with repl sets we write the ops to our oplog too: */
}
+ int sd = myConfig().slaveDelay;
+ if( sd ) {
+ const OpTime ts = o["ts"]._opTime();
+ long long a = ts.getSecs();
+ long long b = time(0);
+ long long lag = b - a;
+ long long sleeptime = sd - lag;
+ if( sleeptime > 0 ) {
+ uassert(12000, "rs slaveDelay differential too big check clocks and systems", sleeptime < 0x40000000);
+ log() << "replSet temp slavedelay sleep:" << sleeptime << rsLog;
+ if( sleeptime < 60 ) {
+ sleepsecs((int) sleeptime);
+ }
+ else {
+ // sleep(hours) would prevent reconfigs from taking effect & such!
+ long long waitUntil = b + sleeptime;
+ while( 1 ) {
+ sleepsecs(6);
+ if( time(0) >= waitUntil )
+ break;
+ if( box.getPrimary() != primary )
+ break;
+ if( myConfig().slaveDelay != sd ) // reconf
+ break;
+ }
+ }
+ }
+ }
}
}
r.tailCheck();
if( !r.haveCursor() ) {
- log() << "replSet TEMP end syncTail pass with " << hn << rsLog;
- // TODO : reuse our cnonection to the primary.
+ log(1) << "replSet end syncTail pass with " << hn << rsLog;
+ // TODO : reuse our connection to the primary.
return;
}
if( box.getPrimary() != primary )
@@ -290,6 +350,10 @@ namespace mongo {
sleepsecs(1);
return;
}
+ if( sp.state.fatal() ) {
+ sleepsecs(5);
+ return;
+ }
/* later, we can sync from up secondaries if we want. tbd. */
if( sp.primary == 0 )
diff --git a/db/security_commands.cpp b/db/security_commands.cpp
index af03294..7bf2813 100644
--- a/db/security_commands.cpp
+++ b/db/security_commands.cpp
@@ -49,9 +49,7 @@ namespace mongo {
class CmdGetNonce : public Command {
public:
virtual bool requiresAuth() { return false; }
- virtual bool logTheOp() {
- return false;
- }
+ virtual bool logTheOp() { return false; }
virtual bool slaveOk() const {
return true;
}
diff --git a/db/stats/snapshots.cpp b/db/stats/snapshots.cpp
index b439b03..3ce80ca 100644
--- a/db/stats/snapshots.cpp
+++ b/db/stats/snapshots.cpp
@@ -163,7 +163,7 @@ namespace mongo {
ss << usage.count;
ss << "</td><td>";
double per = 100 * ((double)usage.time)/elapsed;
- ss << setprecision(4) << fixed << per << "%";
+ ss << setprecision(1) << fixed << per << "%";
ss << "</td>";
}
diff --git a/dbtests/basictests.cpp b/dbtests/basictests.cpp
index 342e982..27f7cdc 100644
--- a/dbtests/basictests.cpp
+++ b/dbtests/basictests.cpp
@@ -234,7 +234,7 @@ namespace BasicTests {
if ( y < 1000 || y > 2500 ){
cout << "sleeptest y: " << y << endl;
ASSERT( y >= 1000 );
- ASSERT( y <= 100000 );
+ /* ASSERT( y <= 100000 ); */
}
}
}
diff --git a/dbtests/jstests.cpp b/dbtests/jstests.cpp
index 81b9673..a9d9db8 100644
--- a/dbtests/jstests.cpp
+++ b/dbtests/jstests.cpp
@@ -541,7 +541,7 @@ namespace JSTests {
ASSERT( s->exec( "c = {c:a.a.toString()}", "foo", false, true, false ) );
out = s->getObject( "c" );
stringstream ss;
- ss << "NumberLong( \"" << val << "\" )";
+ ss << "NumberLong(\"" << val << "\")";
ASSERT_EQUALS( ss.str(), out.firstElement().valuestr() );
ASSERT( s->exec( "d = {d:a.a.toNumber()}", "foo", false, true, false ) );
diff --git a/debian/changelog b/debian/changelog
index a0777fd..47cf19b 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,11 @@
+mongodb (1.6.3) unstable; urgency=low
+
+ * replica_sets slavedelay, rollback
+ * sharding optimization for larger than ram data sets
+ * full change log http://jira.mongodb.org/browse/SERVER/fixforversion/10190
+
+ -- Richard Kreuter <richard@10gen.com> Thu, 23 Sep 2010 16:56:28 -0500
+
mongodb (1.6.2) unstable; urgency=low
* replica_sets some fixes
diff --git a/debian/files b/debian/files
new file mode 100644
index 0000000..2e28959
--- /dev/null
+++ b/debian/files
@@ -0,0 +1 @@
+mongodb_0.9.7_amd64.deb devel optional
diff --git a/debian/mongodb.upstart b/debian/mongodb.upstart
new file mode 100644
index 0000000..ca6f9b7
--- /dev/null
+++ b/debian/mongodb.upstart
@@ -0,0 +1,15 @@
+# Ubuntu upstart file at /etc/init/mongodb.conf
+
+pre-start script
+ mkdir -p /var/lib/mongodb/
+ mkdir -p /var/log/mongodb/
+end script
+
+start on runlevel [2345]
+stop on runlevel [06]
+
+script
+ ENABLE_MONGODB="yes"
+ if [ -f /etc/default/mongodb ]; then . /etc/default/mongodb; fi
+ if [ "x$ENABLE_MONGODB" = "xyes" ]; then exec start-stop-daemon --start --quiet --chuid mongodb --exec /usr/bin/mongod -- --config /etc/mongodb.conf; fi
+end script
diff --git a/debian/preinst b/debian/preinst
new file mode 100644
index 0000000..c2d5362
--- /dev/null
+++ b/debian/preinst
@@ -0,0 +1,37 @@
+#!/bin/sh
+# preinst script for mongodb
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * <new-preinst> `install'
+# * <new-preinst> `install' <old-version>
+# * <new-preinst> `upgrade' <old-version>
+# * <old-preinst> `abort-upgrade' <new-version>
+# for details, see http://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ install|upgrade)
+ ;;
+
+ abort-upgrade)
+ ;;
+
+ *)
+ echo "preinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
+
+
diff --git a/doxygenConfig b/doxygenConfig
index 889bd4f..ea7391a 100644
--- a/doxygenConfig
+++ b/doxygenConfig
@@ -3,7 +3,7 @@
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = MongoDB
-PROJECT_NUMBER = 1.6.2
+PROJECT_NUMBER = 1.6.3
OUTPUT_DIRECTORY = docs/doxygen
CREATE_SUBDIRS = NO
OUTPUT_LANGUAGE = English
diff --git a/jstests/datasize3.js b/jstests/datasize3.js
new file mode 100644
index 0000000..d45f34b
--- /dev/null
+++ b/jstests/datasize3.js
@@ -0,0 +1,32 @@
+
+t = db.datasize3;
+t.drop()
+
+function run( options ){
+ var c = { dataSize : "test.datasize3" };
+ if ( options )
+ Object.extend( c , options );
+ return db.runCommand( c );
+}
+
+t.insert( { x : 1 } )
+
+a = run()
+b = run( { estimate : true } )
+
+assert.eq( a.size , b.size );
+
+
+t.ensureIndex( { x : 1 } )
+
+for ( i=2; i<100; i++ )
+ t.insert( { x : i } )
+
+a = run( { min : { x : 20 } , max : { x : 50 } } )
+b = run( { min : { x : 20 } , max : { x : 50 } , estimate : true } )
+
+assert.eq( a.size , b.size );
+
+
+
+
diff --git a/jstests/disk/diskfull.js b/jstests/disk/diskfull.js
index 8057174..6cbcbb7 100644
--- a/jstests/disk/diskfull.js
+++ b/jstests/disk/diskfull.js
@@ -14,9 +14,10 @@ if ( !doIt ) {
if ( doIt ) {
port = allocatePorts( 1 )[ 0 ];
m = startMongoProgram( "mongod", "--port", port, "--dbpath", "/data/db/diskfulltest", "--nohttpinterface", "--bind_ip", "127.0.0.1" );
- m.getDB( "diskfulltest" ).getCollection( "diskfulltest" ).save( { a: 6 } );
+ c = m.getDB( "diskfulltest" ).getCollection( "diskfulltest" )
+ c.save( { a: 6 } );
assert.soon( function() { return rawMongoProgramOutput().match( /file allocation failure/ ); }, "didn't see 'file allocation failure'" );
- assert.soon( function() { return rawMongoProgramOutput().match( /Caught Assertion in insert , continuing/ ); }, "didn't see 'Caught Assertion...'" );
+ assert.isnull( c.findOne() , "shouldn't exist" );
sleep( 3000 );
m2 = new Mongo( m.host );
printjson( m2.getDBs() );
diff --git a/jstests/evalc.js b/jstests/evalc.js
new file mode 100644
index 0000000..59c9467
--- /dev/null
+++ b/jstests/evalc.js
@@ -0,0 +1,32 @@
+t = db.jstests_evalc;
+t.drop();
+
+for( i = 0; i < 10; ++i ) {
+ t.save( {i:i} );
+}
+
+// SERVER-1610
+
+function op() {
+ uri = db.runCommand( "whatsmyuri" ).you;
+ printjson( uri );
+ p = db.currentOp().inprog;
+ for ( var i in p ) {
+ var o = p[ i ];
+ if ( o.client == uri ) {
+ print( "found it" );
+ return o.opid;
+ }
+ }
+ return -1;
+}
+
+s = startParallelShell( "print( 'starting forked:' + Date() ); for ( i=0; i<500000; i++ ){ db.currentOp(); } print( 'ending forked:' + Date() ); " )
+
+print( "starting eval: " + Date() )
+for ( i=0; i<20000; i++ ){
+ db.eval( "db.jstests_evalc.count( {i:10} );" );
+}
+print( "end eval: " + Date() )
+
+s();
diff --git a/jstests/geo_circle1.js b/jstests/geo_circle1.js
index 9208511..4fe6c5f 100644
--- a/jstests/geo_circle1.js
+++ b/jstests/geo_circle1.js
@@ -36,7 +36,7 @@ for ( i=0; i<searches.length; i++ ){
//printjson( Array.sort( t.find(q).map( function(z){ return z._id; } ) ) )
assert.eq( correct[i].length , t.find( q ).itcount() , "itcount : " + tojson( searches[i] ) );
- assert.eq( correct[i].length , t.find( q ).itcount() , "count : " + tojson( searches[i] ) );
+ assert.eq( correct[i].length , t.find( q ).count() , "count : " + tojson( searches[i] ) );
assert.gt( correct[i].length * 2 , t.find(q).explain().nscanned , "nscanned : " + tojson( searches[i] ) )
}
diff --git a/jstests/geo_queryoptimizer.js b/jstests/geo_queryoptimizer.js
new file mode 100644
index 0000000..7a438bc
--- /dev/null
+++ b/jstests/geo_queryoptimizer.js
@@ -0,0 +1,27 @@
+
+t = db.geo_qo1;
+t.drop()
+
+t.ensureIndex({loc:"2d"})
+
+t.insert({'issue':0})
+t.insert({'issue':1})
+t.insert({'issue':2})
+t.insert({'issue':2, 'loc':[30.12,-118]})
+t.insert({'issue':1, 'loc':[30.12,-118]})
+t.insert({'issue':0, 'loc':[30.12,-118]})
+
+assert.eq( 6 , t.find().itcount() , "A1" )
+
+assert.eq( 2 , t.find({'issue':0}).itcount() , "A2" )
+
+assert.eq( 1 , t.find({'issue':0,'loc':{$near:[30.12,-118]}}).itcount() , "A3" )
+
+assert.eq( 2 , t.find({'issue':0}).itcount() , "B1" )
+
+assert.eq( 6 , t.find().itcount() , "B2" )
+
+assert.eq( 2 , t.find({'issue':0}).itcount() , "B3" )
+
+assert.eq( 1 , t.find({'issue':0,'loc':{$near:[30.12,-118]}}).itcount() , "B4" )
+
diff --git a/jstests/mr_sort.js b/jstests/mr_sort.js
new file mode 100644
index 0000000..7692062
--- /dev/null
+++ b/jstests/mr_sort.js
@@ -0,0 +1,44 @@
+
+t = db.mr_sort;
+t.drop()
+
+t.ensureIndex( { x : 1 } )
+
+t.insert( { x : 1 } )
+t.insert( { x : 10 } )
+t.insert( { x : 2 } )
+t.insert( { x : 9 } )
+t.insert( { x : 3 } )
+t.insert( { x : 8 } )
+t.insert( { x : 4 } )
+t.insert( { x : 7 } )
+t.insert( { x : 5 } )
+t.insert( { x : 6 } )
+
+m = function(){
+ emit( "a" , this.x )
+}
+
+r = function( k , v ){
+ return Array.sum( v )
+}
+
+
+res = t.mapReduce( m , r );
+x = res.convertToSingleObject();
+res.drop();
+assert.eq( { "a" : 55 } , x , "A1" )
+
+res = t.mapReduce( m , r , { query : { x : { $lt : 3 } } } )
+x = res.convertToSingleObject();
+res.drop();
+assert.eq( { "a" : 3 } , x , "A2" )
+
+res = t.mapReduce( m , r , { sort : { x : 1 } , limit : 2 } );
+x = res.convertToSingleObject();
+res.drop();
+assert.eq( { "a" : 3 } , x , "A3" )
+
+
+
+
diff --git a/jstests/numberlong.js b/jstests/numberlong.js
index 848ef87..1cbbc7a 100644
--- a/jstests/numberlong.js
+++ b/jstests/numberlong.js
@@ -4,50 +4,50 @@ n = new NumberLong( 4 );
assert.eq.automsg( "4", "n" );
assert.eq.automsg( "4", "n.toNumber()" );
assert.eq.automsg( "8", "n + 4" );
-assert.eq.automsg( "'NumberLong( 4 )'", "n.toString()" );
-assert.eq.automsg( "'NumberLong( 4 )'", "tojson( n )" );
+assert.eq.automsg( "'NumberLong(4)'", "n.toString()" );
+assert.eq.automsg( "'NumberLong(4)'", "tojson( n )" );
a = {}
a.a = n;
p = tojson( a );
-assert.eq.automsg( "'{ \"a\" : NumberLong( 4 ) }'", "p" );
+assert.eq.automsg( "'{ \"a\" : NumberLong(4) }'", "p" );
-assert.eq.automsg( "NumberLong( 4 )", "eval( tojson( NumberLong( 4 ) ) )" );
+assert.eq.automsg( "NumberLong(4 )", "eval( tojson( NumberLong( 4 ) ) )" );
assert.eq.automsg( "a", "eval( tojson( a ) )" );
n = new NumberLong( -4 );
assert.eq.automsg( "-4", "n" );
assert.eq.automsg( "-4", "n.toNumber()" );
assert.eq.automsg( "0", "n + 4" );
-assert.eq.automsg( "'NumberLong( -4 )'", "n.toString()" );
-assert.eq.automsg( "'NumberLong( -4 )'", "tojson( n )" );
+assert.eq.automsg( "'NumberLong(-4)'", "n.toString()" );
+assert.eq.automsg( "'NumberLong(-4)'", "tojson( n )" );
a = {}
a.a = n;
p = tojson( a );
-assert.eq.automsg( "'{ \"a\" : NumberLong( -4 ) }'", "p" );
+assert.eq.automsg( "'{ \"a\" : NumberLong(-4) }'", "p" );
// too big to fit in double
n = new NumberLong( "11111111111111111" );
assert.eq.automsg( "11111111111111112", "n.toNumber()" );
assert.eq.automsg( "11111111111111116", "n + 4" );
-assert.eq.automsg( "'NumberLong( \"11111111111111111\" )'", "n.toString()" );
-assert.eq.automsg( "'NumberLong( \"11111111111111111\" )'", "tojson( n )" );
+assert.eq.automsg( "'NumberLong(\"11111111111111111\")'", "n.toString()" );
+assert.eq.automsg( "'NumberLong(\"11111111111111111\")'", "tojson( n )" );
a = {}
a.a = n;
p = tojson( a );
-assert.eq.automsg( "'{ \"a\" : NumberLong( \"11111111111111111\" ) }'", "p" );
+assert.eq.automsg( "'{ \"a\" : NumberLong(\"11111111111111111\") }'", "p" );
-assert.eq.automsg( "NumberLong( '11111111111111111' )", "eval( tojson( NumberLong( '11111111111111111' ) ) )" );
+assert.eq.automsg( "NumberLong('11111111111111111' )", "eval( tojson( NumberLong( '11111111111111111' ) ) )" );
assert.eq.automsg( "a", "eval( tojson( a ) )" );
n = new NumberLong( "-11111111111111111" );
assert.eq.automsg( "-11111111111111112", "n.toNumber()" );
assert.eq.automsg( "-11111111111111108", "n + 4" );
-assert.eq.automsg( "'NumberLong( \"-11111111111111111\" )'", "n.toString()" );
-assert.eq.automsg( "'NumberLong( \"-11111111111111111\" )'", "tojson( n )" );
+assert.eq.automsg( "'NumberLong(\"-11111111111111111\")'", "n.toString()" );
+assert.eq.automsg( "'NumberLong(\"-11111111111111111\")'", "tojson( n )" );
a = {}
a.a = n;
p = tojson( a );
-assert.eq.automsg( "'{ \"a\" : NumberLong( \"-11111111111111111\" ) }'", "p" );
+assert.eq.automsg( "'{ \"a\" : NumberLong(\"-11111111111111111\") }'", "p" );
// parsing
assert.throws.automsg( function() { new NumberLong( "" ); } );
diff --git a/jstests/remove3.js b/jstests/remove3.js
index fe1a754..2a51a6e 100644
--- a/jstests/remove3.js
+++ b/jstests/remove3.js
@@ -14,5 +14,5 @@ assert.eq( 4 , t.count() , "B" );
t.remove( { _id : 5 } );
assert.eq( 3 , t.count() , "C" );
-t.remove( { _id : { $lt : 8 } } , "D" );
+t.remove( { _id : { $lt : 8 } } );
assert.eq( 1 , t.count() , "D" );
diff --git a/jstests/remove_justone.js b/jstests/remove_justone.js
new file mode 100644
index 0000000..e412a13
--- /dev/null
+++ b/jstests/remove_justone.js
@@ -0,0 +1,16 @@
+
+t = db.remove_justone
+t.drop()
+
+t.insert( { x : 1 } )
+t.insert( { x : 1 } )
+t.insert( { x : 1 } )
+t.insert( { x : 1 } )
+
+assert.eq( 4 , t.count() )
+
+t.remove( { x : 1 } , true )
+assert.eq( 3 , t.count() )
+
+t.remove( { x : 1 } )
+assert.eq( 0 , t.count() )
diff --git a/jstests/repl/repl10.js b/jstests/repl/repl10.js
index 67c5db1..cc7cf12 100644
--- a/jstests/repl/repl10.js
+++ b/jstests/repl/repl10.js
@@ -26,13 +26,15 @@ doTest = function( signal ) {
am.save( {i:2} );
assert.eq( 2, am.count() );
sleep( 3000 );
-
- rt.stop( true, signal );
- sleep( 3000 );
assert.eq( 1, s.getDB( baseName ).a.count() );
+ soonCount( 2 );
+
rt.stop();
}
-doTest( 15 ); // SIGTERM
-doTest( 9 ); // SIGKILL
+print("repl10.js dotest(15)");
+doTest(15); // SIGTERM
+print("repl10.js dotest(15)");
+doTest(9); // SIGKILL
+print("repl10.js SUCCESS");
diff --git a/jstests/repl/repl12.js b/jstests/repl/repl12.js
new file mode 100644
index 0000000..586aa53
--- /dev/null
+++ b/jstests/repl/repl12.js
@@ -0,0 +1,47 @@
+// SERVER-1626
+// check for initial sync of multiple db's
+
+function debug( x ) {
+ print( "DEBUG:" + tojson( x ) );
+}
+
+rt = new ReplTest( "repl12tests" );
+
+m = rt.start( true );
+
+usedDBs = []
+
+a = "a"
+for( i = 0; i < 3; ++i ) {
+ usedDBs.push( a )
+ m.getDB( a ).c.save( {} );
+ a += "a";
+}
+m.getDB(a).getLastError();
+
+//print("\n\n\n DB NAMES MASTER:");
+//printjson(m.getDBNames());
+
+var z = 10500;
+print("sleeping " + z + "ms");
+sleep(z);
+
+s = rt.start(false);
+
+function countHave(){
+ var have = 0;
+ for ( var i=0; i<usedDBs.length; i++ ){
+ if ( s.getDB( usedDBs[i] ).c.findOne() )
+ have++;
+ }
+ return have;
+}
+
+assert.soon(
+ function() {
+ var c = countHave();
+ debug( "count: " + c );
+ return c == 3; }
+);
+
+//printjson(s.getDBNames());
diff --git a/jstests/repl/replacePeer2.js b/jstests/repl/replacePeer2.js
index c2983dc..33b054a 100644
--- a/jstests/repl/replacePeer2.js
+++ b/jstests/repl/replacePeer2.js
@@ -44,15 +44,8 @@ doTest = function( signal ) {
checkWrite( rp.master(), rp.slave() );
// allow slave to finish initial sync
- assert.soon(
- function() {
- var res = rp.slave().getDB( "admin" ).runCommand( {replacepeer:1} );
- if ( res.ok == 1 )
- return true;
- printjson( res );
- return false;
- }
- );
+ var res = rp.slave().getDB( "admin" ).runCommand( {replacepeer:1} );
+ assert( res.ok , "replacepeer didn't finish: " + tojson( res ) );
// Should not be saved to slave.
writeOne( rp.master() );
diff --git a/jstests/repl/snapshot2.js b/jstests/repl/snapshot2.js
index d65cad7..60b3531 100644
--- a/jstests/repl/snapshot2.js
+++ b/jstests/repl/snapshot2.js
@@ -1,4 +1,6 @@
-// Test SERVER-623 - starting repl peer from a new snapshot of master
+// Test SERVER-623 - starting repl peer from a new snapshot of master
+
+print("snapshot2.js 1 -----------------------------------------------------------");
ports = allocatePorts( 3 );
@@ -7,21 +9,37 @@ var basePath = "/data/db/" + baseName;
a = new MongodRunner( ports[ 0 ], basePath + "-arbiter" );
l = new MongodRunner( ports[ 1 ], basePath + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
-r = new MongodRunner( ports[ 2 ], basePath + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
+r = new MongodRunner( ports[ 2 ], basePath + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
+
+print("snapshot2.js 2 -----------------------------------------------------------");
-rp = new ReplPair( l, r, a );
-rp.start();
-rp.waitForSteadyState();
+rp = new ReplPair(l, r, a);
+rp.start();
+print("snapshot2.js 3 -----------------------------------------------------------");
+rp.waitForSteadyState();
+
+print("snapshot2.js 4 -----------------------------------------------------------");
big = new Array( 2000 ).toString(); // overflow oplog, so test can't pass supriously
-rp.slave().setSlaveOk();
-for( i = 0; i < 500; ++i ) {
- rp.master().getDB( baseName )[ baseName ].save( { _id: new ObjectId(), i: i, b: big } );
- if ( i % 250 == 249 ) {
- assert.soon( function() { return i+1 == rp.slave().getDB( baseName )[ baseName ].count(); } );
+rp.slave().setSlaveOk();
+print("snapshot2.js 5 -----------------------------------------------------------");
+for (i = 0; i < 500; ++i) {
+ rp.master().getDB( baseName )[ baseName ].save( { _id: new ObjectId(), i: i, b: big } );
+ if (i % 250 == 249) {
+ function p() { return i + 1 == rp.slave().getDB(baseName)[baseName].count(); }
+ try {
+ assert.soon(p);
+ } catch (e) {
+ print("\n\n\nsnapshot2.js\ni+1:" + (i + 1));
+ print("slave count:" + rp.slave().getDB(baseName)[baseName].count());
+ sleep(2000);
+ print(p());
+ throw (e);
+ }
sleep( 10 ); // give master a chance to grab a sync point - have such small oplogs the master log might overflow otherwise
}
-}
+}
+print("snapshot2.js 6 -----------------------------------------------------------");
rp.master().getDB( "admin" ).runCommand( {fsync:1,lock:1} );
leftMaster = ( rp.master().host == rp.left().host );
@@ -47,5 +65,8 @@ assert.eq( 500, rp.slave().getDB( baseName )[ baseName ].count() );
rp.master().getDB( baseName )[ baseName ].save( {i:500} );
assert.soon( function() { return 501 == rp.slave().getDB( baseName )[ baseName ].count(); } );
-assert( !rawMongoProgramOutput().match( /resync/ ) );
-assert( !rawMongoProgramOutput().match( /SyncException/ ) ); \ No newline at end of file
+assert( !rawMongoProgramOutput().match( /resync/ ) );
+assert(!rawMongoProgramOutput().match(/SyncException/));
+
+print("snapshot2.js SUCCESS ----------------");
+
diff --git a/jstests/replsets/randomcommands1.js b/jstests/replsets/randomcommands1.js
new file mode 100644
index 0000000..c451e74
--- /dev/null
+++ b/jstests/replsets/randomcommands1.js
@@ -0,0 +1,29 @@
+
+replTest = new ReplSetTest( {name: 'randomcommands1', nodes: 3} );
+
+nodes = replTest.startSet();
+replTest.initiate();
+
+master = replTest.getMaster();
+slaves = replTest.liveNodes.slaves;
+printjson(replTest.liveNodes);
+
+db = master.getDB("foo")
+t = db.foo
+
+ts = slaves.map( function(z){ z.setSlaveOk(); return z.getDB( "foo" ).foo; } )
+
+t.save({a: 1000});
+t.ensureIndex( { a : 1 } )
+
+db.getLastError( 3 , 30000 )
+
+ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } )
+
+t.reIndex()
+
+db.getLastError( 3 , 30000 )
+ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } )
+
+replTest.stopSet( 15 )
+
diff --git a/jstests/replsets/replset1.js b/jstests/replsets/replset1.js
index 6a18dff..5ac94e7 100644
--- a/jstests/replsets/replset1.js
+++ b/jstests/replsets/replset1.js
@@ -112,4 +112,5 @@ doTest = function( signal ) {
replTest.stopSet( signal );
}
-doTest( 15 );
+doTest( 15 );
+print("replset1.js SUCCESS");
diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js
index eaa35ee..f18b467 100644
--- a/jstests/replsets/replset2.js
+++ b/jstests/replsets/replset2.js
@@ -1,111 +1,141 @@
-
-doTest = function( signal ) {
-
- // FAILING TEST
- // See below:
-
- // Test replication with getLastError
-
- // Replica set testing API
- // Create a new replica set test. Specify set name and the number of nodes you want.
- var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
-
- // call startSet() to start each mongod in the replica set
- // this returns a list of nodes
- var nodes = replTest.startSet();
-
- // Call initiate() to send the replSetInitiate command
- // This will wait for initiation
- replTest.initiate();
-
- // Call getMaster to return a reference to the node that's been
- // elected master.
- var master = replTest.getMaster();
-
- // Wait for replication to a single node
- master.getDB("test").bar.insert({n: 1});
-
- // Wait for initial sync
- replTest.awaitReplication();
-
- var slaves = replTest.liveNodes.slaves;
- slaves.forEach(function(slave) { slave.setSlaveOk(); });
-
- var testDB = "repl-test";
-
- var failed = false;
- var callGetLastError = function(w, timeout, db) {
- var result = master.getDB(db).getLastErrorObj( w , timeout );
- printjson( result );
- if(result['ok'] != 1) {
- print("FAILURE");
- failed = true;
- }
- }
-
- // Test getlasterror with multiple inserts
- // TEST FAILS HERE
- print("**** Try inserting a multiple records -- first insert ****")
- master.getDB(testDB).foo.insert({n: 1});
- master.getDB(testDB).foo.insert({n: 2});
- master.getDB(testDB).foo.insert({n: 3});
- callGetLastError(3, 10000, testDB);
-
- print("**** TEMP 1a ****")
-
- m1 = master.getDB(testDB).foo.findOne({n: 1});
- printjson( m1 );
- assert( m1['n'] == 1 , "Failed to save to master on multiple inserts");
-
- print("**** TEMP 1b ****")
-
- var s0 = slaves[0].getDB(testDB).foo.findOne({n: 1});
- assert( s0['n'] == 1 , "Failed to replicate to slave 0 on multiple inserts");
-
- var s1 = slaves[1].getDB(testDB).foo.findOne({n: 1});
- assert( s1['n'] == 1 , "Failed to replicate to slave 1 on multiple inserts");
-
-
- // Test getlasterror with a simple insert
- print("**** Try inserting a single record ****")
- master.getDB(testDB).dropDatabase();
- master.getDB(testDB).foo.insert({n: 1});
- callGetLastError(3, 10000, testDB);
-
- m1 = master.getDB(testDB).foo.findOne({n: 1});
- printjson( m1 );
- assert( m1['n'] == 1 , "Failed to save to master");
-
-
- var s0 = slaves[0].getDB(testDB).foo.findOne({n: 1});
- assert( s0['n'] == 1 , "Failed to replicate to slave 0");
-
- var s1 = slaves[1].getDB(testDB).foo.findOne({n: 1});
- assert( s1['n'] == 1 , "Failed to replicate to slave 1");
-
-
- // Test getlasterror with large insert
- print("**** Try inserting many records ****")
- bigData = new Array(2000).toString()
- for(var n=0; n<1000; n++) {
- master.getDB(testDB).baz.insert({n: n, data: bigData});
+print("\n\nreplset2.js BEGIN");
+
+doTest = function (signal) {
+
+ // FAILING TEST
+ // See below:
+
+ // Test replication with getLastError
+
+ // Replica set testing API
+ // Create a new replica set test. Specify set name and the number of nodes you want.
+ var replTest = new ReplSetTest({ name: 'testSet', nodes: 3, oplogSize: 5 });
+
+ // call startSet() to start each mongod in the replica set
+ // this returns a list of nodes
+ var nodes = replTest.startSet();
+
+ // Call initiate() to send the replSetInitiate command
+ // This will wait for initiation
+ replTest.initiate();
+
+ var testDB = "repl-test";
+
+ // Call getMaster to return a reference to the node that's been
+ // elected master.
+ var master = replTest.getMaster();
+
+ // Wait for replication to a single node
+ master.getDB(testDB).bar.insert({ n: 1 });
+
+ // Wait for initial sync
+ replTest.awaitReplication();
+
+ var slaves = replTest.liveNodes.slaves;
+ slaves.forEach(function (slave) { slave.setSlaveOk(); });
+
+ var failed = false;
+ var callGetLastError = function (w, timeout, db) {
+ try {
+ var result = master.getDB(db).getLastErrorObj(w, timeout);
+ print("replset2.js getLastError result: " + tojson(result));
+ if (result['ok'] != 1) {
+ print("replset2.js FAILURE getlasterror not ok");
+ failed = true;
+ }
+ }
+ catch (e) {
+ print("\nreplset2.js exception in getLastError: " + e + '\n');
+ throw e;
+ }
+ }
+
+ // Test getlasterror with multiple inserts
+ // TEST FAILS HEREg
+ print("\n\nreplset2.js **** Try inserting a multiple records -- first insert ****")
+
+ printjson(master.getDB("admin").runCommand("replSetGetStatus"));
+
+ master.getDB(testDB).foo.insert({ n: 1 });
+ master.getDB(testDB).foo.insert({ n: 2 });
+ master.getDB(testDB).foo.insert({ n: 3 });
+
+ print("\nreplset2.js **** TEMP 1 ****")
+
+ printjson(master.getDB("admin").runCommand("replSetGetStatus"));
+
+ callGetLastError(3, 25000, testDB);
+
+ print("replset2.js **** TEMP 1a ****")
+
+ m1 = master.getDB(testDB).foo.findOne({ n: 1 });
+ printjson(m1);
+ assert(m1['n'] == 1, "replset2.js Failed to save to master on multiple inserts");
+
+ print("replset2.js **** TEMP 1b ****")
+
+ var s0 = slaves[0].getDB(testDB).foo.findOne({ n: 1 });
+ assert(s0['n'] == 1, "replset2.js Failed to replicate to slave 0 on multiple inserts");
+
+ var s1 = slaves[1].getDB(testDB).foo.findOne({ n: 1 });
+ assert(s1['n'] == 1, "replset2.js Failed to replicate to slave 1 on multiple inserts");
+
+ // Test getlasterror with a simple insert
+ print("replset2.js **** Try inserting a single record ****")
+ master.getDB(testDB).dropDatabase();
+ master.getDB(testDB).foo.insert({ n: 1 });
+ callGetLastError(3, 10000, testDB);
+
+ m1 = master.getDB(testDB).foo.findOne({ n: 1 });
+ printjson(m1);
+ assert(m1['n'] == 1, "replset2.js Failed to save to master");
+
+ s0 = slaves[0].getDB(testDB).foo.findOne({ n: 1 });
+ assert(s0['n'] == 1, "replset2.js Failed to replicate to slave 0");
+
+ s1 = slaves[1].getDB(testDB).foo.findOne({ n: 1 });
+ assert(s1['n'] == 1, "replset2.js Failed to replicate to slave 1");
+
+ // Test getlasterror with large insert
+ print("replset2.js **** Try inserting many records ****")
+ try {
+ bigData = new Array(2000).toString()
+ for (var n = 0; n < 1000; n++) {
+ master.getDB(testDB).baz.insert({ n: n, data: bigData });
+ }
+ callGetLastError(3, 60000, testDB);
+
+ print("replset2.js **** V1 ")
+
+ var verifyReplication = function (nodeName, collection) {
+ data = collection.findOne({ n: 1 });
+ assert(data['n'] == 1, "replset2.js Failed to save to " + nodeName);
+ data = collection.findOne({ n: 999 });
+ assert(data['n'] == 999, "replset2.js Failed to save to " + nodeName);
+ }
+
+ print("replset2.js **** V2 ")
+
+ verifyReplication("master", master.getDB(testDB).baz);
+ verifyReplication("slave 0", slaves[0].getDB(testDB).baz);
+ verifyReplication("slave 1", slaves[1].getDB(testDB).baz);
+
+ assert(failed == false, "replset2.js Replication with getLastError failed. See errors.");
}
- callGetLastError(3, 60000, testDB);
-
- var verifyReplication = function(nodeName, collection) {
- data = collection.findOne({n: 1});
- assert( data['n'] == 1 , "Failed to save to " + nodeName);
- data = collection.findOne({n: 999});
- assert( data['n'] == 999 , "Failed to save to " + nodeName);
+ catch(e) {
+ print("ERROR: " + e);
+ print("Master oplog findOne:");
+ printjson(master.getDB("local").oplog.rs.find().sort({"$natural": -1}).limit(1).next());
+ print("Slave 0 oplog findOne:");
+ printjson(slaves[0].getDB("local").oplog.rs.find().sort({"$natural": -1}).limit(1).next());
+ print("Slave 1 oplog findOne:");
+ printjson(slaves[1].getDB("local").oplog.rs.find().sort({"$natural": -1}).limit(1).next());
}
- verifyReplication("master", master.getDB(testDB).baz);
- verifyReplication("slave 0", slaves[0].getDB(testDB).baz);
- verifyReplication("slave 1", slaves[1].getDB(testDB).baz);
-
- assert( failed == false, "Replication with getLastError failed. See errors." );
-
- replTest.stopSet( signal );
+
+ replTest.stopSet(signal);
}
-doTest( 15 );
+doTest( 15 );
+
+print("\nreplset2.js SUCCESS\n");
diff --git a/jstests/replsets/replset4.js b/jstests/replsets/replset4.js
index 4f6c454..9b1f2e9 100644
--- a/jstests/replsets/replset4.js
+++ b/jstests/replsets/replset4.js
@@ -1,29 +1,44 @@
-doTest = function( signal ) {
-
- // Test orphaned master steps down
- var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
-
- replTest.startSet();
- replTest.initiate();
-
- var master = replTest.getMaster();
-
- // Kill both slaves, simulating a network partition
- var slaves = replTest.liveNodes.slaves;
- for(var i=0; i<slaves.length; i++) {
- var slave_id = replTest.getNodeId(slaves[i]);
- replTest.stop( slave_id );
- }
-
- var result = master.getDB("admin").runCommand({ismaster: 1});
- printjson( result );
- assert.soon(function() {
- var result = master.getDB("admin").runCommand({ismaster: 1});
- printjson( result );
- return (result['ok'] == 1 && result['ismaster'] == false);
- }, "Master fails to step down when orphaned.");
-
- replTest.stopSet( signal );
-}
-
+doTest = function (signal) {
+
+ // Test orphaned master steps down
+ var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 });
+
+ replTest.startSet();
+ replTest.initiate();
+
+ var master = replTest.getMaster();
+
+ // Kill both slaves, simulating a network partition
+ var slaves = replTest.liveNodes.slaves;
+ for (var i = 0; i < slaves.length; i++) {
+ var slave_id = replTest.getNodeId(slaves[i]);
+ replTest.stop(slave_id);
+ }
+
+ print("replset4.js 1");
+
+ var result = master.getDB("admin").runCommand({ ismaster: 1 });
+
+ print("replset4.js 2");
+ printjson(result);
+
+ assert.soon(
+ function () {
+ try {
+ var result = master.getDB("admin").runCommand({ ismaster: 1 });
+ return (result['ok'] == 1 && result['ismaster'] == false);
+ } catch (e) {
+ print("replset4.js caught " + e);
+ return false;
+ }
+ },
+ "Master fails to step down when orphaned."
+ );
+
+ print("replset4.js worked, stopping");
+ replTest.stopSet(signal);
+}
+
+print("replset4.js");
doTest( 15 );
+print("replset4.js SUCCESS");
diff --git a/jstests/replsets/replset5.js b/jstests/replsets/replset5.js
new file mode 100644
index 0000000..fe1761e
--- /dev/null
+++ b/jstests/replsets/replset5.js
@@ -0,0 +1,72 @@
+// rs test getlasterrordefaults
+
+doTest = function (signal) {
+
+ // Test getLastError defaults
+ var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 });
+
+ var nodes = replTest.startSet();
+
+ // Initiate set with default for getLastError
+ var config = replTest.getReplSetConfig();
+ config.settings = {};
+ config.settings.getLastErrorDefaults = { 'w': 3, 'wtimeout': 20000 };
+
+ replTest.initiate(config);
+
+ //
+ var master = replTest.getMaster();
+ replTest.awaitSecondaryNodes();
+ var testDB = "foo";
+
+ // Initial replication
+ master.getDB("barDB").bar.save({ a: 1 });
+ replTest.awaitReplication();
+
+ // These writes should be replicated immediately
+ master.getDB(testDB).foo.insert({ n: 1 });
+ master.getDB(testDB).foo.insert({ n: 2 });
+ master.getDB(testDB).foo.insert({ n: 3 });
+
+ // *** NOTE ***: The default doesn't seem to be propogating.
+ // When I run getlasterror with no defaults, the slaves don't have the data:
+ // These getlasterror commands can be run individually to verify this.
+ //master.getDB("admin").runCommand({ getlasterror: 1, w: 3, wtimeout: 20000 });
+ master.getDB("admin").runCommand({getlasterror: 1});
+
+ var slaves = replTest.liveNodes.slaves;
+ slaves[0].setSlaveOk();
+ slaves[1].setSlaveOk();
+
+ print("Testing slave counts");
+
+ // These should all have 3 documents, but they don't always.
+ var master1count = master.getDB(testDB).foo.count();
+ assert( master1count == 3, "Master has " + master1count + " of 3 documents!");
+
+ var slave0count = slaves[0].getDB(testDB).foo.count();
+ assert( slave0count == 3, "Slave 0 has " + slave0count + " of 3 documents!");
+
+ var slave1count = slaves[1].getDB(testDB).foo.count();
+ assert( slave1count == 3, "Slave 1 has " + slave1count + " of 3 documents!");
+
+ print("Testing slave 0");
+
+ var s0 = slaves[0].getDB(testDB).foo.find();
+ assert(s0.next()['n']);
+ assert(s0.next()['n']);
+ assert(s0.next()['n']);
+
+ print("Testing slave 1");
+
+ var s1 = slaves[1].getDB(testDB).foo.find();
+ assert(s1.next()['n']);
+ assert(s1.next()['n']);
+ assert(s1.next()['n']);
+
+ // End test
+ replTest.stopSet(signal);
+}
+
+doTest( 15 );
+print("replset5.js success");
diff --git a/jstests/replsets/replset_remove_node.js b/jstests/replsets/replset_remove_node.js
index e06a951..fcb754c 100644
--- a/jstests/replsets/replset_remove_node.js
+++ b/jstests/replsets/replset_remove_node.js
@@ -27,8 +27,13 @@ doTest = function( signal ) {
// Remove that node from the configuration
replTest.remove( slaveId );
- // Then, reinitiate
- replTest.reInitiate();
+ // Now, re-initiate
+ var c = master.getDB("local")['system.replset'].findOne();
+ var config = replTest.getReplSetConfig();
+ config.version = c.version + 1;
+ config.members = [ { "_id" : 0, "host" : replTest.host + ":31000" },
+ { "_id" : 2, "host" : replTest.host + ":31002" } ]
+ replTest.initiate( config , 'replSetReconfig' );
// Make sure that a new master comes up
master = replTest.getMaster();
@@ -52,6 +57,8 @@ doTest = function( signal ) {
stat = slaves[0].getDB("admin").runCommand({replSetGetStatus: 1});
return stat.members.length == 2;
}, "Wrong number of members", 60000);
-}
-
-doTest( 15 );
+}
+
+print("replset_remove_node.js");
+doTest(15);
+print("replset_remove_node SUCCESS");
diff --git a/jstests/replsets/rollback.js b/jstests/replsets/rollback.js
index f072d61..8840371 100644
--- a/jstests/replsets/rollback.js
+++ b/jstests/replsets/rollback.js
@@ -28,7 +28,7 @@ function wait(f) {
var n = 0;
while (!f()) {
if( n % 4 == 0 )
- print("waiting " + w);
+ print("rollback.js waiting " + w);
if (++n == 4) {
print("" + f);
}
@@ -72,6 +72,31 @@ doTest = function (signal) {
// Wait for initial replication
var a = a_conn.getDB("foo");
var b = b_conn.getDB("foo");
+
+ /* force the oplog to roll */
+ if (new Date() % 2 == 0) {
+ print("ROLLING OPLOG AS PART OF TEST (we only do this sometimes)");
+ var pass = 1;
+ var first = a.getSisterDB("local").oplog.rs.find().sort({ $natural: 1 }).limit(1)[0];
+ a.roll.insert({ x: 1 });
+ while (1) {
+ for (var i = 0; i < 10000; i++)
+ a.roll.update({}, { $inc: { x: 1} });
+ var op = a.getSisterDB("local").oplog.rs.find().sort({ $natural: 1 }).limit(1)[0];
+ if (tojson(op.h) != tojson(first.h)) {
+ printjson(op);
+ printjson(first);
+ break;
+ }
+ pass++;
+ a.getLastError(2); // unlikely secondary isn't keeping up, but let's avoid possible intermittent issues with that.
+ }
+ print("PASSES FOR OPLOG ROLL: " + pass);
+ }
+ else {
+ print("NO ROLL");
+ }
+
a.bar.insert({ q: 1, a: "foo" });
a.bar.insert({ q: 2, a: "foo", x: 1 });
a.bar.insert({ q: 3, bb: 9, a: "foo" });
@@ -122,8 +147,9 @@ doTest = function (signal) {
friendlyEqual(a.bar.find().sort({ _id: 1 }).toArray(), b.bar.find().sort({ _id: 1 }).toArray(), "server data sets do not match");
- pause("SUCCESS");
+ pause("rollback.js SUCCESS");
replTest.stopSet(signal);
}
+print("rollback.js");
doTest( 15 );
diff --git a/jstests/replsets/rollback2.js b/jstests/replsets/rollback2.js
index f9c48ff..483d221 100644
--- a/jstests/replsets/rollback2.js
+++ b/jstests/replsets/rollback2.js
@@ -27,8 +27,8 @@ function wait(f) {
w++;
var n = 0;
while (!f()) {
- if( n % 4 == 0 )
- print("waiting " + w);
+ if (n % 4 == 0)
+ print("rollback2.js waiting " + w);
if (++n == 4) {
print("" + f);
}
@@ -192,8 +192,10 @@ doTest = function (signal) {
assert( dbs_match(a,b), "server data sets do not match after rollback, something is wrong");
- pause("SUCCESS");
+ pause("rollback2.js SUCCESS");
replTest.stopSet(signal);
}
+print("rollback2.js");
+
doTest( 15 );
diff --git a/jstests/replsets/rollback3.js b/jstests/replsets/rollback3.js
new file mode 100755
index 0000000..5c2f2f1
--- /dev/null
+++ b/jstests/replsets/rollback3.js
@@ -0,0 +1,224 @@
+// test rollback in replica sets
+
+// try running as :
+//
+// mongo --nodb rollback.js | tee out | grep -v ^m31
+//
+
+var debugging = 0;
+
+function pause(s) {
+ print(s);
+ while (debugging) {
+ sleep(3000);
+ print(s);
+ }
+}
+
+function deb(obj) {
+ if( debugging ) {
+ print("\n\n\n" + obj + "\n\n");
+ }
+}
+
+w = 0;
+
+function wait(f) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if (n % 4 == 0)
+ print("rollback3.js waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ if (n == 200) {
+ print("rollback3.js failing waited too long");
+ throw "wait error";
+ }
+ sleep(1000);
+ }
+}
+
+function dbs_match(a, b) {
+ print("dbs_match");
+
+ var ac = a.system.namespaces.find().sort({name:1}).toArray();
+ var bc = b.system.namespaces.find().sort({name:1}).toArray();
+ if (!friendlyEqual(ac, bc)) {
+ print("dbs_match: namespaces don't match");
+ print("\n\n");
+ printjson(ac);
+ print("\n\n");
+ printjson(bc);
+ print("\n\n");
+ return false;
+ }
+
+ var c = a.getCollectionNames();
+ for( var i in c ) {
+ print("checking " + c[i]);
+ // system.indexes doesn't have _id so the more involved sort here:
+ if (!friendlyEqual(a[c[i]].find().sort({ _id: 1, ns:1, name:1 }).toArray(), b[c[i]].find().sort({ _id: 1, ns:1,name:1 }).toArray())) {
+ print("dbs_match: collections don't match " + c[i]);
+ if (a[c[i]].count() < 12) {
+ printjson(a[c[i]].find().sort({ _id: 1 }).toArray());
+ printjson(b[c[i]].find().sort({ _id: 1 }).toArray());
+ }
+ return false;
+ }
+ }
+ return true;
+}
+
+/* these writes will be initial data and replicate everywhere. */
+function doInitialWrites(db) {
+ db.b.insert({ x: 1 });
+ db.b.ensureIndex({ x: 1 });
+ db.oldname.insert({ y: 1 });
+ db.oldname.insert({ y: 2 });
+ db.oldname.ensureIndex({ y: 1 },true);
+ t = db.bar;
+ t.insert({ q:0});
+ t.insert({ q: 1, a: "foo" });
+ t.insert({ q: 2, a: "foo", x: 1 });
+ t.insert({ q: 3, bb: 9, a: "foo" });
+ t.insert({ q: 40333333, a: 1 });
+ for (var i = 0; i < 200; i++) t.insert({ i: i });
+ t.insert({ q: 40, a: 2 });
+ t.insert({ q: 70, txt: 'willremove' });
+
+ db.createCollection("kap", { capped: true, size: 5000 });
+ db.kap.insert({ foo: 1 })
+}
+
+/* these writes on one primary only and will be rolled back. */
+function doItemsToRollBack(db) {
+ t = db.bar;
+ t.insert({ q: 4 });
+ t.update({ q: 3 }, { q: 3, rb: true });
+
+ t.remove({ q: 40 }); // multi remove test
+
+ t.update({ q: 2 }, { q: 39, rb: true });
+
+ // rolling back a delete will involve reinserting the item(s)
+ t.remove({ q: 1 });
+
+ t.update({ q: 0 }, { $inc: { y: 1} });
+
+ db.kap.insert({ foo: 2 })
+ db.kap2.insert({ foo: 2 })
+
+ // create a collection (need to roll back the whole thing)
+ db.newcoll.insert({ a: true });
+
+ // create a new empty collection (need to roll back the whole thing)
+ db.createCollection("abc");
+
+ // drop a collection - we'll need all its data back!
+ t.drop();
+
+ // drop an index - verify it comes back
+ db.b.dropIndexes();
+
+ // two to see if we transitively rollback?
+ db.oldname.renameCollection("newname");
+ db.newname.renameCollection("fooname");
+
+ assert(db.fooname.count() > 0, "count rename");
+
+ // test roll back (drop) a whole database
+ abc = db.getSisterDB("abc");
+ abc.foo.insert({ x: 1 });
+ abc.bar.insert({ y: 999 });
+
+ // test making and dropping a database
+ //mkd = db.getSisterDB("mkd");
+ //mkd.c.insert({ y: 99 });
+ //mkd.dropDatabase();
+}
+
+function doWritesToKeep2(db) {
+ t = db.bar;
+ t.insert({ txt: 'foo' });
+ t.remove({ q: 70 });
+ t.update({ q: 0 }, { $inc: { y: 33} });
+}
+
+doTest = function (signal) {
+
+ var replTest = new ReplSetTest({ name: 'unicomplex', nodes: 3 });
+ var nodes = replTest.nodeList();
+ //print(tojson(nodes));
+
+ var conns = replTest.startSet();
+ var r = replTest.initiate({ "_id": "unicomplex",
+ "members": [
+ { "_id": 0, "host": nodes[0] },
+ { "_id": 1, "host": nodes[1] },
+ { "_id": 2, "host": nodes[2], arbiterOnly: true}]
+ });
+
+ // Make sure we have a master
+ var master = replTest.getMaster();
+ a_conn = conns[0];
+ A = a_conn.getDB("admin");
+ b_conn = conns[1];
+ a_conn.setSlaveOk();
+ b_conn.setSlaveOk();
+ B = b_conn.getDB("admin");
+ assert(master == conns[0], "conns[0] assumed to be master");
+ assert(a_conn == master);
+
+ //deb(master);
+
+ // Make sure we have an arbiter
+ assert.soon(function () {
+ res = conns[2].getDB("admin").runCommand({ replSetGetStatus: 1 });
+ return res.myState == 7;
+ }, "Arbiter failed to initialize.");
+
+ // Wait for initial replication
+ var a = a_conn.getDB("foo");
+ var b = b_conn.getDB("foo");
+ doInitialWrites(a);
+
+ // wait for secondary to get this data
+ wait(function () { return b.bar.count() == a.bar.count(); });
+
+ A.runCommand({ replSetTest: 1, blind: true });
+ wait(function () { return B.isMaster().ismaster; });
+
+ doItemsToRollBack(b);
+
+ // a should not have the new data as it was in blind state.
+ B.runCommand({ replSetTest: 1, blind: true });
+ A.runCommand({ replSetTest: 1, blind: false });
+ wait(function () { return !B.isMaster().ismaster; });
+ wait(function () { return A.isMaster().ismaster; });
+
+ assert(a.bar.count() >= 1, "count check");
+ doWritesToKeep2(a);
+
+ // A is 1 2 3 7 8
+ // B is 1 2 3 4 5 6
+
+ // bring B back online
+ // as A is primary, B will roll back and then catch up
+ B.runCommand({ replSetTest: 1, blind: false });
+
+ wait(function () { return B.isMaster().ismaster || B.isMaster().secondary; });
+
+ // everyone is up here...
+ assert(A.isMaster().ismaster || A.isMaster().secondary, "A up");
+ assert(B.isMaster().ismaster || B.isMaster().secondary, "B up");
+
+ assert( dbs_match(a,b), "server data sets do not match after rollback, something is wrong");
+
+ pause("rollback3.js SUCCESS");
+ replTest.stopSet(signal);
+}
+
+print("rollback3.js");
+doTest( 15 );
diff --git a/jstests/replsets/sync1.js b/jstests/replsets/sync1.js
index 0f7754e..e60d128 100644
--- a/jstests/replsets/sync1.js
+++ b/jstests/replsets/sync1.js
@@ -2,8 +2,10 @@
var debugging=0;
+w = 0;
+
function pause(s) {
- // for debugging just to keep processes running
+ // for debugging just to keep processes running
print("\nsync1.js: " + s);
if (debugging) {
while (1) {
@@ -11,180 +13,197 @@ function pause(s) {
sleep(4000);
}
}
-}
-
-doTest = function (signal) {
-
+}
+
+doTest = function (signal) {
+
var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 });
var nodes = replTest.startSet({ oplogSize: "40" });
-
- sleep(5000);
-
- print("\nsync1.js ********************************************************************** part 0");
- replTest.initiate();
-
+
+ sleep(5000);
+
+ print("\nsync1.js ********************************************************************** part 0");
+ replTest.initiate();
+
// get master
- print("\nsync1.js ********************************************************************** part 1");
- var master = replTest.getMaster();
- print("\nsync1.js ********************************************************************** part 2");
- var dbs = [master.getDB("foo")];
-
- for (var i in nodes) {
- if (nodes[i] + "" == master + "") {
- continue;
- }
- dbs.push(nodes[i].getDB("foo"));
- nodes[i].setSlaveOk();
- }
-
- print("\nsync1.js ********************************************************************** part 3");
- dbs[0].bar.drop();
-
- print("\nsync1.js ********************************************************************** part 4");
+ print("\nsync1.js ********************************************************************** part 1");
+ var master = replTest.getMaster();
+ print("\nsync1.js ********************************************************************** part 2");
+ var dbs = [master.getDB("foo")];
+
+ for (var i in nodes) {
+ if (nodes[i] + "" == master + "") {
+ continue;
+ }
+ dbs.push(nodes[i].getDB("foo"));
+ nodes[i].setSlaveOk();
+ }
+
+ print("\nsync1.js ********************************************************************** part 3");
+ dbs[0].bar.drop();
+
+ print("\nsync1.js ********************************************************************** part 4");
// slow things down a bit
- dbs[0].bar.ensureIndex({ x: 1 });
- dbs[0].bar.ensureIndex({ y: 1 });
- dbs[0].bar.ensureIndex({ z: 1 });
- dbs[0].bar.ensureIndex({ w: 1 });
-
- var ok = false;
- var inserts = 100000;
-
- print("\nsync1.js ********************************************************************** part 5");
-
- for (var i = 0; i < inserts; i++) {
- dbs[0].bar.insert({ x: "foo" + i, y: "bar" + i, z: i, w: "biz baz bar boo" });
- }
-
- var status;
- do {
- sleep(1000);
- status = dbs[0].getSisterDB("admin").runCommand({replSetGetStatus : 1});
- } while(status.members[1].state != 2 && status.members[2].state != 2);
-
- print("\nsync1.js ********************************************************************** part 6");
- dbs[0].getSisterDB("admin").runCommand({ replSetTest: 1, blind: true });
-
- print("\nsync1.js ********************************************************************** part 7");
-
- sleep(5000);
-
- // yay! there are out-of-date nodes
- var max1;
- var max2;
- var count = 0;
- while( 1 ) {
- try {
- max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
- max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
- }
- catch(e) {
- print("\nsync1.js couldn't get max1/max2; retrying " + e);
- sleep(2000);
- count++;
- if (count == 50) {
- assert(false, "errored out 50 times");
- }
- continue;
- }
- break;
- }
-
- print("\nsync1.js ********************************************************************** part 8");
-
- if (max1.z == (inserts-1) && max2.z == (inserts-1)) {
- print("\nsync1.js try increasing # if inserts and running again");
- replTest.stopSet(signal);
- return;
- }
-
- // wait for a new master to be elected
- sleep(5000);
-
- // figure out who is master now
- var newMaster = replTest.getMaster();
-
- print("\nsync1.js ********************************************************************** part 9");
-
- print("\nsync1.js \nsync1.js ********************************************************************** part 9 **********************************************");
+ dbs[0].bar.ensureIndex({ x: 1 });
+ dbs[0].bar.ensureIndex({ y: 1 });
+ dbs[0].bar.ensureIndex({ z: 1 });
+ dbs[0].bar.ensureIndex({ w: 1 });
+
+ var ok = false;
+ var inserts = 100000;
+
+ print("\nsync1.js ********************************************************************** part 5");
+
+ for (var i = 0; i < inserts; i++) {
+ dbs[0].bar.insert({ x: "foo" + i, y: "bar" + i, z: i, w: "biz baz bar boo" });
+ }
+
+ var status;
+ do {
+ sleep(1000);
+ status = dbs[0].getSisterDB("admin").runCommand({ replSetGetStatus: 1 });
+ } while (status.members[1].state != 2 && status.members[2].state != 2);
+
+ print("\nsync1.js ********************************************************************** part 6");
+ dbs[0].getSisterDB("admin").runCommand({ replSetTest: 1, blind: true });
+
+ print("\nsync1.js ********************************************************************** part 7");
+
+ sleep(5000);
+
+ var max1;
+ var max2;
+ var count = 0;
+ while (1) {
+ try {
+ max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
+ max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
+ }
+ catch (e) {
+ print("\nsync1.js couldn't get max1/max2; retrying " + e);
+ sleep(2000);
+ count++;
+ if (count == 50) {
+ assert(false, "errored out 50 times");
+ }
+ continue;
+ }
+ break;
+ }
+
+ // wait for a new master to be elected
+ sleep(5000);
+ var newMaster;
+
+ print("\nsync1.js ********************************************************************** part 9");
+
+ for (var q = 0; q < 10; q++) {
+ // figure out who is master now
+ newMaster = replTest.getMaster();
+ if (newMaster + "" != master + "")
+ break;
+ sleep(2000);
+ if (q > 6) print("sync1.js zzz....");
+ }
+
assert(newMaster + "" != master + "", "new master is " + newMaster + ", old master was " + master);
+
print("\nsync1.js new master is " + newMaster + ", old master was " + master);
-
- count = 0;
- do {
- try {
- max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
- max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
- }
- catch( e ) {
- print("\nsync1.js: exception querying; will sleep and try again " + e);
- sleep(2000);
- continue;
- }
-
- print("\nsync1.js waiting for match " + count + " " + Date() + " z[1]:" + max1.z + " z[2]:" + max2.z);
-
+
+ print("\nsync1.js ********************************************************************** part 9.1");
+
+ count = 0;
+ countExceptions = 0;
+ do {
+ try {
+ max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
+ max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
+ }
+ catch (e) {
+ if (countExceptions++ > 300) {
+ print("dbs[1]:");
+ try {
+ printjson(dbs[1].isMaster());
+ printjson(dbs[1].bar.count());
+ }
+ catch (e) { print(e); }
+ print("dbs[2]:");
+ try {
+ printjson(dbs[2].isMaster());
+ printjson(dbs[2].bar.count());
+ }
+ catch (e) { print(e); }
+ assert(false, "sync1.js too many exceptions, failing");
+ }
+ print("\nsync1.js: exception querying; will sleep and try again " + e);
+ sleep(3000);
+ continue;
+ }
+
+ print("\nsync1.js waiting for match " + count + " " + Date() + " z[1]:" + max1.z + " z[2]:" + max2.z);
+
// printjson(max1);
// printjson(max2);
-
- sleep(2000);
-
- count++;
- if (count == 100) {
- pause("fail phase 1");
- assert(false, "replsets/\nsync1.js fails timing out");
- replTest.stopSet(signal);
- return;
- }
- } while (max1.z != max2.z);
-
- // okay, now they're caught up. We have a max:
- var max = max1.z;
-
- print("\nsync1.js ********************************************************************** part 10");
-
- // now, let's see if rollback works
- var result = dbs[0].getSisterDB("admin").runCommand({ replSetTest: 1, blind: false });
- dbs[0].getMongo().setSlaveOk();
-
- printjson(result);
- sleep(5000);
-
- // FAIL! This never resyncs
- // now this should resync
- print("\nsync1.js ********************************************************************** part 11");
- var max0 = null;
- count = 0;
- do {
- try {
- max0 = dbs[0].bar.find().sort({ z: -1 }).limit(1).next();
- }
- catch(e) {
- print("\nsync1.js part 11 exception on bar.find() will sleep and try again " + e);
- sleep(2000);
- continue;
- }
-
- printjson(max);
- printjson(max0);
- print("\nsync1.js part 11 waiting for match " + count + " " + Date() + " z[0]:" + max0.z + " z:" + max);
-
- sleep(2000);
-
- count++;
- if (count == 100) {
- pause("fail part 11");
- assert(false, "replsets/\nsync1.js fails timing out");
- replTest.stopSet(signal);
- return;
- }
- print("||||| count:" + count);
- printjson(max0);
- } while (! max0 || max0.z != max);
-
- print("\nsync1.js ********************************************************************** part 12");
- pause("\nsync1.js success");
- replTest.stopSet(signal);
+
+ sleep(2000);
+
+ count++;
+ if (count == 100) {
+ pause("fail phase 1");
+ assert(false, "replsets/\nsync1.js fails timing out");
+ replTest.stopSet(signal);
+ return;
+ }
+ } while (max1.z != max2.z);
+
+ // okay, now they're caught up. We have a max: max1.z
+
+ print("\nsync1.js ********************************************************************** part 10");
+
+ // now, let's see if rollback works
+ var result = dbs[0].getSisterDB("admin").runCommand({ replSetTest: 1, blind: false });
+ dbs[0].getMongo().setSlaveOk();
+
+ printjson(result);
+ sleep(5000);
+
+ // now this should resync
+ print("\nsync1.js ********************************************************************** part 11");
+ var max0 = null;
+ count = 0;
+ do {
+ try {
+ max0 = dbs[0].bar.find().sort({ z: -1 }).limit(1).next();
+ max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
+ }
+ catch (e) {
+ print("\nsync1.js part 11 exception on bar.find() will sleep and try again " + e);
+ sleep(2000);
+ continue;
+ }
+
+ print("part 11");
+ if (max0) {
+ print("max0.z:" + max0.z);
+ print("max1.z:" + max1.z);
+ }
+
+ sleep(2000);
+
+ count++;
+ if (count == 100) {
+ pause("FAIL part 11");
+ assert(false, "replsets/\nsync1.js fails timing out");
+ replTest.stopSet(signal);
+ return;
+ }
+ //print("||||| count:" + count);
+ //printjson(max0);
+ } while (!max0 || max0.z != max1.z);
+
+ print("\nsync1.js ********************************************************************** part 12");
+ pause("\nsync1.js success");
+ replTest.stopSet(signal);
}
if( 1 || debugging ) {
diff --git a/jstests/replsets/two_initsync.js b/jstests/replsets/two_initsync.js
new file mode 100755
index 0000000..6ae8475
--- /dev/null
+++ b/jstests/replsets/two_initsync.js
@@ -0,0 +1,93 @@
+// test initial sync failing
+
+// try running as :
+//
+// mongo --nodb two_initsync.js | tee out | grep -v ^m31
+//
+
+var debugging = 0;
+
+function pause(s) {
+ print(s);
+ while (debugging) {
+ sleep(3000);
+ print(s);
+ }
+}
+
+function deb(obj) {
+ if( debugging ) {
+ print("\n\n\n" + obj + "\n\n");
+ }
+}
+
+w = 0;
+
+function wait(f) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if( n % 4 == 0 )
+ print("twoinitsync waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ sleep(1000);
+ }
+}
+
+doTest = function (signal) {
+ var replTest = new ReplSetTest({ name: 'testSet', nodes: 0 });
+
+ var first = replTest.add();
+
+ // Initiate replica set
+ assert.soon(function () {
+ var res = first.getDB("admin").runCommand({ replSetInitiate: null });
+ return res['ok'] == 1;
+ });
+
+ // Get status
+ assert.soon(function () {
+ var result = first.getDB("admin").runCommand({ replSetGetStatus: true });
+ return result['ok'] == 1;
+ });
+
+ var a = replTest.getMaster().getDB("two");
+ for (var i = 0; i < 20000; i++)
+ a.coll.insert({ i: i, s: "a b" });
+
+ // Start a second node
+ var second = replTest.add();
+
+ // Add the second node.
+ // This runs the equivalent of rs.add(newNode);
+ replTest.reInitiate();
+
+ var b = second.getDB("admin");
+
+ // attempt to interfere with the initial sync
+ b._adminCommand({ replSetTest: 1, forceInitialSyncFailure: 1 });
+
+ // wait(function () { return a._adminCommand("replSetGetStatus").members.length == 2; });
+
+ wait(function () { return b.isMaster().secondary || b.isMaster().ismaster; });
+
+ print("b.isMaster:");
+ printjson(b.isMaster());
+
+ second.setSlaveOk();
+
+ print("b.isMaster:");
+ printjson(b.isMaster());
+
+ wait(function () { var c = b.getSisterDB("two").coll.count(); print(c); return c == 20000; });
+
+ print("two_initsync.js SUCCESS");
+
+ replTest.stopSet(signal);
+}
+
+
+print("two_initsync.js");
+doTest( 15 );
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
index 4ab75ee..a2a8197 100644
--- a/jstests/sharding/features3.js
+++ b/jstests/sharding/features3.js
@@ -64,7 +64,7 @@ killTime = (new Date()).getTime() - killTime.getTime()
print( "killTime: " + killTime );
assert.eq( 2 , state , "failed killing" );
-assert.gt( 3000 , killTime , "took too long to kill" )
+assert.gt( 10000 , killTime , "took too long to kill" )
join()
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index 9f0cef4..86faedc 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -139,25 +139,25 @@ assert.eq( 0 , doCounts( "after dropDatabase called" ) )
s.adminCommand( { enablesharding : "test2" } );
s.adminCommand( { shardcollection : "test2.foo" , key : { num : 1 } } );
-a = s.getDB( "test2" ).foo;
-b = s2.getDB( "test2" ).foo;
-a.save( { num : 1 } );
-a.save( { num : 2 } );
-a.save( { num : 3 } );
-
+dba = s.getDB( "test2" );
+dbb = s2.getDB( "test2" );
+dba.foo.save( { num : 1 } );
+dba.foo.save( { num : 2 } );
+dba.foo.save( { num : 3 } );
+dba.getLastError();
assert.eq( 1 , s.onNumShards( "foo" , "test2" ) , "B on 1 shards" );
-assert.eq( 3 , a.count() , "Ba" );
-assert.eq( 3 , b.count() , "Bb" );
+assert.eq( 3 , dba.foo.count() , "Ba" );
+assert.eq( 3 , dbb.foo.count() , "Bb" );
s.adminCommand( { split : "test2.foo" , middle : { num : 2 } } );
s.adminCommand( { movechunk : "test2.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test2" ) ).name } );
assert.eq( 2 , s.onNumShards( "foo" , "test2" ) , "B on 2 shards" );
-x = a.stats()
+x = dba.foo.stats()
printjson( x )
-y = b.stats()
+y = dbb.foo.stats()
printjson( y )
diff --git a/lib/libboost_thread-gcc41-mt-d-1_34_1.a b/lib/libboost_thread-gcc41-mt-d-1_34_1.a
new file mode 100644
index 0000000..09377ac
--- /dev/null
+++ b/lib/libboost_thread-gcc41-mt-d-1_34_1.a
Binary files differ
diff --git a/rpm/mongo.spec b/rpm/mongo.spec
index 8224de4..225639e 100644
--- a/rpm/mongo.spec
+++ b/rpm/mongo.spec
@@ -1,5 +1,5 @@
Name: mongo
-Version: 1.6.2
+Version: 1.6.3
Release: mongodb_1%{?dist}
Summary: mongo client shell and tools
License: AGPL 3.0
diff --git a/s/chunk.cpp b/s/chunk.cpp
index 50d4e76..cf1f992 100644
--- a/s/chunk.cpp
+++ b/s/chunk.cpp
@@ -62,7 +62,7 @@ namespace mongo {
void Chunk::setShard( const Shard& s ){
_shard = s;
_manager->_migrationNotification(this);
- _markModified();
+ _modified = true;
}
bool Chunk::contains( const BSONObj& obj ) const{
@@ -224,7 +224,7 @@ namespace mongo {
vector<ChunkPtr> newChunks;
vector<BSONObj>::const_iterator i = m.begin();
BSONObj nextPoint = i->getOwned();
- _markModified();
+ _modified = true;
do {
BSONObj splitPoint = nextPoint;
log(4) << "splitPoint: " << splitPoint << endl;
@@ -238,9 +238,9 @@ namespace mongo {
uasserted( 13395, ss.str() );
}
- ChunkPtr s( new Chunk( _manager, splitPoint , nextPoint , _shard) );
- s->_markModified();
- newChunks.push_back(s);
+ ChunkPtr c( new Chunk( _manager, splitPoint , nextPoint , _shard) );
+ c->_modified = true;
+ newChunks.push_back( c );
} while ( i != m.end() );
// Have the chunk manager reflect the key change for the first chunk and create an entry for every
@@ -439,6 +439,7 @@ namespace mongo {
<< "min" << getMin()
<< "max" << getMax()
<< "maxSize" << ( MaxChunkSize + 1 )
+ << "estimate" << true
) , result ) );
conn.done();
@@ -549,10 +550,6 @@ namespace mongo {
return o["lastmod"];
}
- void Chunk::_markModified(){
- _modified = true;
- }
-
string Chunk::toString() const {
stringstream ss;
ss << "ns:" << _manager->getns() << " at: " << _shard.toString() << " lastmod: " << _lastmod.toString() << " min: " << _min << " max: " << _max;
@@ -577,7 +574,7 @@ namespace mongo {
if ( _chunkMap.empty() ){
ChunkPtr c( new Chunk(this, _key.globalMin(), _key.globalMax(), config->getPrimary()) );
- c->_markModified();
+ c->setModified( true );
_chunkMap[c->getMax()] = c;
_chunkRanges.reloadAll(_chunkMap);
@@ -902,7 +899,7 @@ namespace mongo {
int numOps = 0;
for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ){
ChunkPtr c = i->second;
- if ( ! c->_modified )
+ if ( ! c->getModified() )
continue;
numOps++;
@@ -966,6 +963,7 @@ namespace mongo {
for ( unsigned i=0; i<toFix.size(); i++ ){
toFix[i]->_lastmod = newVersions[i];
+ toFix[i]->setModified( false );
}
massert( 10417 , "how did version get smalled" , getVersion_inlock() >= a );
@@ -1240,5 +1238,4 @@ namespace mongo {
return conn.runCommand( "admin" , cmd , result );
}
-
} // namespace mongo
diff --git a/s/chunk.h b/s/chunk.h
index b81b788..82f2300 100644
--- a/s/chunk.h
+++ b/s/chunk.h
@@ -127,8 +127,6 @@ namespace mongo {
void appendShortVersion( const char * name , BSONObjBuilder& b );
- void _markModified();
-
static int MaxChunkSize;
string genID() const;
@@ -136,7 +134,8 @@ namespace mongo {
const ChunkManager* getManager() const { return _manager; }
- bool modified();
+ bool getModified() { return _modified; }
+ void setModified( bool modified ) { _modified = modified; }
ShardChunkVersion getVersionOnConfigServer() const;
private:
diff --git a/s/d_logic.cpp b/s/d_logic.cpp
index ddf83e8..62288ed 100644
--- a/s/d_logic.cpp
+++ b/s/d_logic.cpp
@@ -56,11 +56,11 @@ namespace mongo {
DbMessage d(m);
const char *ns = d.getns();
string errmsg;
- if ( shardVersionOk( ns , errmsg ) ){
+ if ( shardVersionOk( ns , opIsWrite( op ) , errmsg ) ){
return false;
}
- log() << "shardVersionOk failed ns:(" << ns << ") op:(" << opToString(op) << ") " << errmsg << endl;
+ log(1) << "connection meta data too old - will retry ns:(" << ns << ") op:(" << opToString(op) << ") " << errmsg << endl;
if ( doesOpGetAResponse( op ) ){
assert( dbresponse );
diff --git a/s/d_logic.h b/s/d_logic.h
index 0f7ba35..a000f6b 100644
--- a/s/d_logic.h
+++ b/s/d_logic.h
@@ -155,7 +155,7 @@ namespace mongo {
/**
* @return true if the current threads shard version is ok, or not in sharded version
*/
- bool shardVersionOk( const string& ns , string& errmsg );
+ bool shardVersionOk( const string& ns , bool write , string& errmsg );
/**
* @return true if we took care of the message and nothing else should be done
diff --git a/s/d_migrate.cpp b/s/d_migrate.cpp
index fac63af..b8ee78e 100644
--- a/s/d_migrate.cpp
+++ b/s/d_migrate.cpp
@@ -107,7 +107,8 @@ namespace mongo {
int loops = 0;
Timer t;
- while ( t.seconds() < 600 ){ // 10 minutes
+ while ( t.seconds() < 900 ){ // 15 minutes
+ assert( dbMutex.getState() == 0 );
sleepmillis( 20 );
set<CursorId> now;
@@ -519,6 +520,7 @@ namespace mongo {
// 4.
for ( int i=0; i<86400; i++ ){ // don't want a single chunk move to take more than a day
+ assert( dbMutex.getState() == 0 );
sleepsecs( 1 );
ScopedDbConnection conn( to );
BSONObj res;
@@ -537,6 +539,8 @@ namespace mongo {
if ( res["state"].String() == "steady" )
break;
+
+ killCurrentOp.checkForInterrupt();
}
timing.done(4);
diff --git a/s/d_split.cpp b/s/d_split.cpp
index 7ae8384..fdefc7e 100644
--- a/s/d_split.cpp
+++ b/s/d_split.cpp
@@ -66,8 +66,22 @@ namespace mongo {
int num = 0;
NamespaceDetails *d = nsdetails(ns);
int idxNo = d->idxNo(*id);
- for( BtreeCursor c( d, idxNo, *id, min, max, false, 1 ); c.ok(); c.advance(), ++num );
+
+ // only yielding on firt half for now
+ // after this it should be in ram, so 2nd should be fast
+ {
+ shared_ptr<Cursor> c( new BtreeCursor( d, idxNo, *id, min, max, false, 1 ) );
+ scoped_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) );
+ while ( c->ok() ){
+ num++;
+ c->advance();
+ if ( ! cc->yieldSometimes() )
+ break;
+ }
+ }
+
num /= 2;
+
BtreeCursor c( d, idxNo, *id, min, max, false, 1 );
for( ; num; c.advance(), --num );
diff --git a/s/d_state.cpp b/s/d_state.cpp
index 16f46cd..3f13b79 100644
--- a/s/d_state.cpp
+++ b/s/d_state.cpp
@@ -523,7 +523,7 @@ namespace mongo {
* @ return true if not in sharded mode
or if version for this client is ok
*/
- bool shardVersionOk( const string& ns , string& errmsg ){
+ bool shardVersionOk( const string& ns , bool isWriteOp , string& errmsg ){
if ( ! shardingState.enabled() )
return true;
@@ -549,7 +549,7 @@ namespace mongo {
if ( version == 0 && clientVersion > 0 ){
stringstream ss;
- ss << "version: " << version << " clientVersion: " << clientVersion;
+ ss << "collection was dropped or this shard no longer valied version: " << version << " clientVersion: " << clientVersion;
errmsg = ss.str();
return false;
}
@@ -565,6 +565,13 @@ namespace mongo {
return false;
}
+ if ( isWriteOp && version.majorVersion() == clientVersion.majorVersion() ){
+ // this means there was just a split
+ // since on a split w/o a migrate this server is ok
+ // going to accept write
+ return true;
+ }
+
stringstream ss;
ss << "your version is too old ns: " + ns << " global: " << version << " client: " << clientVersion;
errmsg = ss.str();
diff --git a/s/s_only.cpp b/s/s_only.cpp
index 74ee9c0..1f66e70 100644
--- a/s/s_only.cpp
+++ b/s/s_only.cpp
@@ -35,12 +35,13 @@ namespace mongo {
boost::thread_specific_ptr<Client> currentClient;
- Client::Client(const char *desc) :
+ Client::Client(const char *desc , MessagingPort *p) :
_context(0),
_shutdown(false),
_desc(desc),
_god(0),
- _lastOp(0)
+ _lastOp(0),
+ _mp(p)
{
}
Client::~Client(){}
diff --git a/s/shardconnection.cpp b/s/shardconnection.cpp
index 5d89b51..694693b 100644
--- a/s/shardconnection.cpp
+++ b/s/shardconnection.cpp
@@ -63,7 +63,12 @@ namespace mongo {
Status* ss = i->second;
assert( ss );
if ( ss->avail ){
- release( addr , ss->avail );
+ /* if we're shutting down, don't want to initiate release mechanism as it is slow,
+ and isn't needed since all connections will be closed anyway */
+ if ( inShutdown() )
+ delete ss->avail;
+ else
+ release( addr , ss->avail );
ss->avail = 0;
}
delete ss;
@@ -146,10 +151,18 @@ namespace mongo {
void release( const string& addr , DBClientBase * conn ){
resetShardVersion( conn );
BSONObj res;
- if ( conn->simpleCommand( "admin" , &res , "unsetSharding" ) )
- pool.release( addr , conn );
- else {
- log(LL_ERROR) << " couldn't unset sharding :( " << res << endl;
+
+ try {
+ if ( conn->simpleCommand( "admin" , &res , "unsetSharding" ) ){
+ pool.release( addr , conn );
+ }
+ else {
+ log(LL_ERROR) << " couldn't unset sharding :( " << res << endl;
+ delete conn;
+ }
+ }
+ catch ( std::exception& e ){
+ log(LL_ERROR) << "couldn't unsert sharding : " << e.what() << endl;
delete conn;
}
}
diff --git a/s/strategy_shard.cpp b/s/strategy_shard.cpp
index 91759fd..144bf79 100644
--- a/s/strategy_shard.cpp
+++ b/s/strategy_shard.cpp
@@ -196,7 +196,7 @@ namespace mongo {
if ( multi ){
}
else if ( strcmp( query.firstElement().fieldName() , "_id" ) || query.nFields() != 1 ){
- throw UserException( 8013 , "can't do update with query that doesn't have the shard key" );
+ throw UserException( 8013 , "can't do non-multi update with query that doesn't have the shard key" );
}
else {
save = true;
@@ -282,7 +282,7 @@ namespace mongo {
if ( left <= 0 )
throw e;
left--;
- log() << "update failed b/c of StaleConfigException, retrying "
+ log() << "delete failed b/c of StaleConfigException, retrying "
<< " left:" << left << " ns: " << r.getns() << " patt: " << pattern << endl;
r.reset( false );
shards.clear();
diff --git a/s/util.h b/s/util.h
index 8d78fe8..7695eda 100644
--- a/s/util.h
+++ b/s/util.h
@@ -87,6 +87,9 @@ namespace mongo {
ss << _major << "|" << _minor;
return ss.str();
}
+
+ int majorVersion() const { return _major; }
+ int minorVersion() const { return _minor; }
operator unsigned long long() const { return _combined; }
diff --git a/scripting/engine.cpp b/scripting/engine.cpp
index 9e20a3a..da108c6 100644
--- a/scripting/engine.cpp
+++ b/scripting/engine.cpp
@@ -305,7 +305,9 @@ namespace mongo {
_real = 0;
}
else {
- log() << "warning: scopeCache is empty!" << endl;
+ // this means that the Scope was killed from a different thread
+ // for example a cursor got timed out that has a $where clause
+ log(3) << "warning: scopeCache is empty!" << endl;
delete _real;
_real = 0;
}
diff --git a/scripting/engine_spidermonkey.cpp b/scripting/engine_spidermonkey.cpp
index 22102ba..c8f2eca 100644
--- a/scripting/engine_spidermonkey.cpp
+++ b/scripting/engine_spidermonkey.cpp
@@ -670,7 +670,8 @@ namespace mongo {
CHECKNEWOBJECT(o,_context,"Bindata_BinData1");
int len;
const char * data = e.binData( len );
- assert( JS_SetPrivate( _context , o , new BinDataHolder( data ) ) );
+ assert( data );
+ assert( JS_SetPrivate( _context , o , new BinDataHolder( data , len ) ) );
setProperty( o , "len" , toval( (double)len ) );
setProperty( o , "type" , toval( (double)e.binDataType() ) );
diff --git a/scripting/sm_db.cpp b/scripting/sm_db.cpp
index 940d785..8ba612b 100644
--- a/scripting/sm_db.cpp
+++ b/scripting/sm_db.cpp
@@ -95,7 +95,13 @@ namespace mongo {
JSBool internal_cursor_hasNext(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
DBClientCursor *cursor = getCursor( cx, obj );
- *rval = cursor->more() ? JSVAL_TRUE : JSVAL_FALSE;
+ try {
+ *rval = cursor->more() ? JSVAL_TRUE : JSVAL_FALSE;
+ }
+ catch ( std::exception& e ){
+ JS_ReportError( cx , e.what() );
+ return JS_FALSE;
+ }
return JS_TRUE;
}
@@ -108,13 +114,23 @@ namespace mongo {
JSBool internal_cursor_next(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
DBClientCursor *cursor = getCursor( cx, obj );
- if ( ! cursor->more() ){
- JS_ReportError( cx , "cursor at the end" );
+
+ BSONObj n;
+
+ try {
+ if ( ! cursor->more() ){
+ JS_ReportError( cx , "cursor at the end" );
+ return JS_FALSE;
+ }
+
+ n = cursor->next();
+ }
+ catch ( std::exception& e ){
+ JS_ReportError( cx , e.what() );
return JS_FALSE;
}
- Convertor c(cx);
- BSONObj n = cursor->next();
+ Convertor c(cx);
*rval = c.toval( &n );
return JS_TRUE;
}
@@ -310,7 +326,7 @@ namespace mongo {
}
JSBool mongo_remove(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
- smuassert( cx , "mongo_remove needs 2 arguments" , argc == 2 );
+ smuassert( cx , "mongo_remove needs 2 or 3 arguments" , argc == 2 || argc == 3 );
smuassert( cx , "2nd param to insert has to be an object" , JSVAL_IS_OBJECT( argv[1] ) );
Convertor c( cx );
@@ -324,9 +340,12 @@ namespace mongo {
string ns = c.toString( argv[0] );
BSONObj o = c.toObject( argv[1] );
-
+ bool justOne = false;
+ if ( argc > 2 )
+ justOne = c.toBoolean( argv[2] );
+
try {
- conn->remove( ns , o );
+ conn->remove( ns , o , justOne );
return JS_TRUE;
}
catch ( ... ){
@@ -861,12 +880,14 @@ namespace mongo {
JSBool numberlong_tostring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
Convertor c(cx);
stringstream ss;
- if ( c.hasProperty( obj, "top" ) ) {
- long long val = c.toNumberLongUnsafe( obj );
- ss << "NumberLong( \"" << val << "\" )";
- } else {
- ss << "NumberLong( " << c.getNumber( obj, "floatApprox" ) << " )";
- }
+ long long val = c.toNumberLongUnsafe( obj );
+ const long long limit = 2LL << 30;
+
+ if ( val <= -limit || limit <= val )
+ ss << "NumberLong(\"" << val << "\")";
+ else
+ ss << "NumberLong(" << val << ")";
+
string ret = ss.str();
return *rval = c.toval( ret.c_str() );
}
diff --git a/scripting/v8_db.cpp b/scripting/v8_db.cpp
index 5752fde..e178875 100644
--- a/scripting/v8_db.cpp
+++ b/scripting/v8_db.cpp
@@ -297,7 +297,7 @@ namespace mongo {
}
v8::Handle<v8::Value> mongoRemove(const v8::Arguments& args){
- jsassert( args.Length() == 2 , "remove needs 2 args" );
+ jsassert( args.Length() == 2 || args.Length() == 3 , "remove needs 2 args" );
jsassert( args[1]->IsObject() , "have to remove an object template" );
DBClientBase * conn = getConnection( args );
@@ -306,10 +306,15 @@ namespace mongo {
v8::Handle<v8::Object> in = args[1]->ToObject();
BSONObj o = v8ToMongo( in );
+ bool justOne = false;
+ if ( args.Length() > 2 ){
+ justOne = args[2]->BooleanValue();
+ }
+
DDD( "want to remove : " << o.jsonString() );
try {
v8::Unlocker u;
- conn->remove( ns , o );
+ conn->remove( ns , o , justOne );
}
catch ( ... ){
return v8::ThrowException( v8::String::New( "socket error on remove" ) );
@@ -714,11 +719,14 @@ namespace mongo {
v8::Handle<v8::Object> it = args.This();
stringstream ss;
- if ( !it->Has( v8::String::New( "top" ) ) ) {
- ss << "NumberLong( " << it->Get( v8::String::New( "floatApprox" ) )->NumberValue() << " )";
- } else {
- ss << "NumberLong( \"" << numberLongVal( it ) << "\" )";
- }
+ long long val = numberLongVal( it );
+ const long long limit = 2LL << 30;
+
+ if ( val <= -limit || limit <= val )
+ ss << "NumberLong(\"" << val << "\")";
+ else
+ ss << "NumberLong(" << val << ")";
+
string ret = ss.str();
return v8::String::New( ret.c_str() );
}
diff --git a/shell/collection.js b/shell/collection.js
index 68ee03d..dfbb6a1 100644
--- a/shell/collection.js
+++ b/shell/collection.js
@@ -68,6 +68,9 @@ DBCollection.prototype.help = function () {
DBCollection.prototype.getFullName = function(){
return this._fullName;
}
+DBCollection.prototype.getMongo = function(){
+ return this._db.getMongo();
+}
DBCollection.prototype.getDB = function(){
return this._db;
}
@@ -170,8 +173,8 @@ DBCollection.prototype.insert = function( obj , _allow_dot ){
this._lastID = obj._id;
}
-DBCollection.prototype.remove = function( t ){
- this._mongo.remove( this._fullName , this._massageObject( t ) );
+DBCollection.prototype.remove = function( t , justOne ){
+ this._mongo.remove( this._fullName , this._massageObject( t ) , justOne ? true : false );
}
DBCollection.prototype.update = function( query , obj , upsert , multi ){
diff --git a/shell/mongo_vstudio.cpp b/shell/mongo_vstudio.cpp
index 06384ca..8b23ef1 100644
--- a/shell/mongo_vstudio.cpp
+++ b/shell/mongo_vstudio.cpp
@@ -1,1766 +1,1766 @@
-const char * jsconcatcode =
-"__quiet = false;\n"
- "__magicNoPrint = { __magicNoPrint : 1111 }\n"
- "chatty = function(s){\n"
- "if ( ! __quiet )\n"
- "print( s );}\n"
- "friendlyEqual = function( a , b ){\n"
- "if ( a == b )\n"
- "return true;\n"
- "if ( tojson( a ) == tojson( b ) )\n"
- "return true;\n"
- "return false;}\n"
- "doassert = function (msg) {\n"
- "if (msg.indexOf(\"assert\") == 0)\n"
- "print(msg);\n"
- "else\n"
- "print(\"assert: \" + msg);\n"
- "throw msg;}\n"
- "assert = function( b , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( b )\n"
- "return;\n"
- "doassert( msg == undefined ? \"assert failed\" : \"assert failed : \" + msg );}\n"
- "assert.automsg = function( b ) {\n"
- "assert( eval( b ), b );}\n"
- "assert._debug = false;\n"
- "assert.eq = function( a , b , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( a == b )\n"
- "return;\n"
- "if ( ( a != null && b != null ) && friendlyEqual( a , b ) )\n"
- "return;\n"
- "doassert( \"[\" + tojson( a ) + \"] != [\" + tojson( b ) + \"] are not equal : \" + msg );}\n"
- "assert.eq.automsg = function( a, b ) {\n"
- "assert.eq( eval( a ), eval( b ), \"[\" + a + \"] != [\" + b + \"]\" );}\n"
- "assert.neq = function( a , b , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( a != b )\n"
- "return;\n"
- "doassert( \"[\" + a + \"] != [\" + b + \"] are equal : \" + msg );}\n"
- "assert.repeat = function( f, msg, timeout, interval ) {\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "var start = new Date();\n"
- "timeout = timeout || 30000;\n"
- "interval = interval || 200;\n"
- "var last;\n"
- "while( 1 ) {\n"
- "if ( typeof( f ) == \"string\" ){\n"
- "if ( eval( f ) )\n"
- "return;}\n"
- "else {\n"
- "if ( f() )\n"
- "return;}\n"
- "if ( ( new Date() ).getTime() - start.getTime() > timeout )\n"
- "break;\n"
- "sleep( interval );}}\n"
- "assert.soon = function( f, msg, timeout, interval ) {\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "var start = new Date();\n"
- "timeout = timeout || 30000;\n"
- "interval = interval || 200;\n"
- "var last;\n"
- "while( 1 ) {\n"
- "if ( typeof( f ) == \"string\" ){\n"
- "if ( eval( f ) )\n"
- "return;}\n"
- "else {\n"
- "if ( f() )\n"
- "return;}\n"
- "if ( ( new Date() ).getTime() - start.getTime() > timeout )\n"
- "doassert( \"assert.soon failed: \" + f + \", msg:\" + msg );\n"
- "sleep( interval );}}\n"
- "assert.throws = function( func , params , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "try {\n"
- "func.apply( null , params );}\n"
- "catch ( e ){\n"
- "return e;}\n"
- "doassert( \"did not throw exception: \" + msg );}\n"
- "assert.throws.automsg = function( func, params ) {\n"
- "assert.throws( func, params, func.toString() );}\n"
- "assert.commandWorked = function( res , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( res.ok == 1 )\n"
- "return;\n"
- "doassert( \"command failed: \" + tojson( res ) + \" : \" + msg );}\n"
- "assert.commandFailed = function( res , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( res.ok == 0 )\n"
- "return;\n"
- "doassert( \"command worked when it should have failed: \" + tojson( res ) + \" : \" + msg );}\n"
- "assert.isnull = function( what , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( what == null )\n"
- "return;\n"
- "doassert( \"supposed to null (\" + ( msg || \"\" ) + \") was: \" + tojson( what ) );}\n"
- "assert.lt = function( a , b , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( a < b )\n"
- "return;\n"
- "doassert( a + \" is not less than \" + b + \" : \" + msg );}\n"
- "assert.gt = function( a , b , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( a > b )\n"
- "return;\n"
- "doassert( a + \" is not greater than \" + b + \" : \" + msg );}\n"
- "assert.lte = function( a , b , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( a <= b )\n"
- "return;\n"
- "doassert( a + \" is not less than or eq \" + b + \" : \" + msg );}\n"
- "assert.gte = function( a , b , msg ){\n"
- "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
- "if ( a >= b )\n"
- "return;\n"
- "doassert( a + \" is not greater than or eq \" + b + \" : \" + msg );}\n"
- "assert.close = function( a , b , msg , places ){\n"
- "if (places === undefined) {\n"
- "places = 4;}\n"
- "if (Math.round((a - b) * Math.pow(10, places)) === 0) {\n"
- "return;}\n"
- "doassert( a + \" is not equal to \" + b + \" within \" + places +\n"
- "\" places, diff: \" + (a-b) + \" : \" + msg );\n"
- "};\n"
- "Object.extend = function( dst , src , deep ){\n"
- "for ( var k in src ){\n"
- "var v = src[k];\n"
- "if ( deep && typeof(v) == \"object\" ){\n"
- "v = Object.extend( typeof ( v.length ) == \"number\" ? [] : {} , v , true );}\n"
- "dst[k] = v;}\n"
- "return dst;}\n"
- "argumentsToArray = function( a ){\n"
- "var arr = [];\n"
- "for ( var i=0; i<a.length; i++ )\n"
- "arr[i] = a[i];\n"
- "return arr;}\n"
- "isString = function( x ){\n"
- "return typeof( x ) == \"string\";}\n"
- "isNumber = function(x){\n"
- "return typeof( x ) == \"number\";}\n"
- "isObject = function( x ){\n"
- "return typeof( x ) == \"object\";}\n"
- "String.prototype.trim = function() {\n"
- "return this.replace(/^\\s+|\\s+$/g,\"\");}\n"
- "String.prototype.ltrim = function() {\n"
- "return this.replace(/^\\s+/,\"\");}\n"
- "String.prototype.rtrim = function() {\n"
- "return this.replace(/\\s+$/,\"\");}\n"
- "Date.timeFunc = function( theFunc , numTimes ){\n"
- "var start = new Date();\n"
- "numTimes = numTimes || 1;\n"
- "for ( var i=0; i<numTimes; i++ ){\n"
- "theFunc.apply( null , argumentsToArray( arguments ).slice( 2 ) );}\n"
- "return (new Date()).getTime() - start.getTime();}\n"
- "Date.prototype.tojson = function(){\n"
- "return \"\\\"\" + this.toString() + \"\\\"\";}\n"
- "RegExp.prototype.tojson = RegExp.prototype.toString;\n"
- "Array.contains = function( a , x ){\n"
- "for ( var i=0; i<a.length; i++ ){\n"
- "if ( a[i] == x )\n"
- "return true;}\n"
- "return false;}\n"
- "Array.unique = function( a ){\n"
- "var u = [];\n"
- "for ( var i=0; i<a.length; i++){\n"
- "var o = a[i];\n"
- "if ( ! Array.contains( u , o ) ){\n"
- "u.push( o );}}\n"
- "return u;}\n"
- "Array.shuffle = function( arr ){\n"
- "for ( var i=0; i<arr.length-1; i++ ){\n"
- "var pos = i+Random.randInt(arr.length-i);\n"
- "var save = arr[i];\n"
- "arr[i] = arr[pos];\n"
- "arr[pos] = save;}\n"
- "return arr;}\n"
- "Array.tojson = function( a , indent ){\n"
- "if (!indent)\n"
- "indent = \"\";\n"
- "if (a.length == 0) {\n"
- "return \"[ ]\";}\n"
- "var s = \"[\\n\";\n"
- "indent += \"\\t\";\n"
- "for ( var i=0; i<a.length; i++){\n"
- "s += indent + tojson( a[i], indent );\n"
- "if ( i < a.length - 1 ){\n"
- "s += \",\\n\";}}\n"
- "if ( a.length == 0 ) {\n"
- "s += indent;}\n"
- "indent = indent.substring(1);\n"
- "s += \"\\n\"+indent+\"]\";\n"
- "return s;}\n"
- "Array.fetchRefs = function( arr , coll ){\n"
- "var n = [];\n"
- "for ( var i=0; i<arr.length; i ++){\n"
- "var z = arr[i];\n"
- "if ( coll && coll != z.getCollection() )\n"
- "continue;\n"
- "n.push( z.fetch() );}\n"
- "return n;}\n"
- "Array.sum = function( arr ){\n"
- "if ( arr.length == 0 )\n"
- "return null;\n"
- "var s = arr[0];\n"
- "for ( var i=1; i<arr.length; i++ )\n"
- "s += arr[i];\n"
- "return s;}\n"
- "Array.avg = function( arr ){\n"
- "if ( arr.length == 0 )\n"
- "return null;\n"
- "return Array.sum( arr ) / arr.length;}\n"
- "Array.stdDev = function( arr ){\n"
- "var avg = Array.avg( arr );\n"
- "var sum = 0;\n"
- "for ( var i=0; i<arr.length; i++ ){\n"
- "sum += Math.pow( arr[i] - avg , 2 );}\n"
- "return Math.sqrt( sum / arr.length );}\n"
- "Object.keySet = function( o ) {\n"
- "var ret = new Array();\n"
- "for( i in o ) {\n"
- "if ( !( i in o.__proto__ && o[ i ] === o.__proto__[ i ] ) ) {\n"
- "ret.push( i );}}\n"
- "return ret;}\n"
- "if ( ! NumberLong.prototype ) {\n"
- "NumberLong.prototype = {}}\n"
- "NumberLong.prototype.tojson = function() {\n"
- "return this.toString();}\n"
- "if ( ! ObjectId.prototype )\n"
- "ObjectId.prototype = {}\n"
- "ObjectId.prototype.toString = function(){\n"
- "return this.str;}\n"
- "ObjectId.prototype.tojson = function(){\n"
- "return \"ObjectId(\\\"\" + this.str + \"\\\")\";}\n"
- "ObjectId.prototype.isObjectId = true;\n"
- "ObjectId.prototype.getTimestamp = function(){\n"
- "return new Date(parseInt(this.toString().slice(0,8), 16)*1000);}\n"
- "ObjectId.prototype.equals = function( other){\n"
- "return this.str == other.str;}\n"
- "if ( typeof( DBPointer ) != \"undefined\" ){\n"
- "DBPointer.prototype.fetch = function(){\n"
- "assert( this.ns , \"need a ns\" );\n"
- "assert( this.id , \"need an id\" );\n"
- "return db[ this.ns ].findOne( { _id : this.id } );}\n"
- "DBPointer.prototype.tojson = function(indent){\n"
- "return tojson({\"ns\" : this.ns, \"id\" : this.id}, indent);}\n"
- "DBPointer.prototype.getCollection = function(){\n"
- "return this.ns;}\n"
- "DBPointer.prototype.toString = function(){\n"
- "return \"DBPointer \" + this.ns + \":\" + this.id;}}\n"
- "else {\n"
- "print( \"warning: no DBPointer\" );}\n"
- "if ( typeof( DBRef ) != \"undefined\" ){\n"
- "DBRef.prototype.fetch = function(){\n"
- "assert( this.$ref , \"need a ns\" );\n"
- "assert( this.$id , \"need an id\" );\n"
- "return db[ this.$ref ].findOne( { _id : this.$id } );}\n"
- "DBRef.prototype.tojson = function(indent){\n"
- "return tojson({\"$ref\" : this.$ref, \"$id\" : this.$id}, indent);}\n"
- "DBRef.prototype.getCollection = function(){\n"
- "return this.$ref;}\n"
- "DBRef.prototype.toString = function(){\n"
- "return this.tojson();}}\n"
- "else {\n"
- "print( \"warning: no DBRef\" );}\n"
- "if ( typeof( BinData ) != \"undefined\" ){\n"
- "BinData.prototype.tojson = function () {\n"
- "//return \"BinData type: \" + this.type + \" len: \" + this.len;\n"
- "return this.toString();}}\n"
- "else {\n"
- "print( \"warning: no BinData class\" );}\n"
- "if ( typeof( UUID ) != \"undefined\" ){\n"
- "UUID.prototype.tojson = function () {\n"
- "return this.toString();}}\n"
- "if ( typeof _threadInject != \"undefined\" ){\n"
- "print( \"fork() available!\" );\n"
- "Thread = function(){\n"
- "this.init.apply( this, arguments );}\n"
- "_threadInject( Thread.prototype );\n"
- "ScopedThread = function() {\n"
- "this.init.apply( this, arguments );}\n"
- "ScopedThread.prototype = new Thread( function() {} );\n"
- "_scopedThreadInject( ScopedThread.prototype );\n"
- "fork = function() {\n"
- "var t = new Thread( function() {} );\n"
- "Thread.apply( t, arguments );\n"
- "return t;}\n"
- "EventGenerator = function( me, collectionName, mean ) {\n"
- "this.mean = mean;\n"
- "this.events = new Array( me, collectionName );}\n"
- "EventGenerator.prototype._add = function( action ) {\n"
- "this.events.push( [ Random.genExp( this.mean ), action ] );}\n"
- "EventGenerator.prototype.addInsert = function( obj ) {\n"
- "this._add( \"t.insert( \" + tojson( obj ) + \" )\" );}\n"
- "EventGenerator.prototype.addRemove = function( obj ) {\n"
- "this._add( \"t.remove( \" + tojson( obj ) + \" )\" );}\n"
- "EventGenerator.prototype.addUpdate = function( objOld, objNew ) {\n"
- "this._add( \"t.update( \" + tojson( objOld ) + \", \" + tojson( objNew ) + \" )\" );}\n"
- "EventGenerator.prototype.addCheckCount = function( count, query, shouldPrint, checkQuery ) {\n"
- "query = query || {};\n"
- "shouldPrint = shouldPrint || false;\n"
- "checkQuery = checkQuery || false;\n"
- "var action = \"assert.eq( \" + count + \", t.count( \" + tojson( query ) + \" ) );\"\n"
- "if ( checkQuery ) {\n"
- "action += \" assert.eq( \" + count + \", t.find( \" + tojson( query ) + \" ).toArray().length );\"}\n"
- "if ( shouldPrint ) {\n"
- "action += \" print( me + ' ' + \" + count + \" );\";}\n"
- "this._add( action );}\n"
- "EventGenerator.prototype.getEvents = function() {\n"
- "return this.events;}\n"
- "EventGenerator.dispatch = function() {\n"
- "var args = argumentsToArray( arguments );\n"
- "var me = args.shift();\n"
- "var collectionName = args.shift();\n"
- "var m = new Mongo( db.getMongo().host );\n"
- "var t = m.getDB( \"test\" )[ collectionName ];\n"
- "for( var i in args ) {\n"
- "sleep( args[ i ][ 0 ] );\n"
- "eval( args[ i ][ 1 ] );}}\n"
- "ParallelTester = function() {\n"
- "this.params = new Array();}\n"
- "ParallelTester.prototype.add = function( fun, args ) {\n"
- "args = args || [];\n"
- "args.unshift( fun );\n"
- "this.params.push( args );}\n"
- "ParallelTester.prototype.run = function( msg, newScopes ) {\n"
- "newScopes = newScopes || false;\n"
- "assert.parallelTests( this.params, msg, newScopes );}\n"
- "ParallelTester.createJstestsLists = function( n ) {\n"
- "var params = new Array();\n"
- "for( var i = 0; i < n; ++i ) {\n"
- "params.push( [] );}\n"
- "var makeKeys = function( a ) {\n"
- "var ret = {};\n"
- "for( var i in a ) {\n"
- "ret[ a[ i ] ] = 1;}\n"
- "return ret;}\n"
- "var skipTests = makeKeys( [ \"jstests/dbadmin.js\",\n"
- "\"jstests/repair.js\",\n"
- "\"jstests/cursor8.js\",\n"
- "\"jstests/recstore.js\",\n"
- "\"jstests/extent.js\",\n"
- "\"jstests/indexb.js\",\n"
- "\"jstests/profile1.js\",\n"
- "\"jstests/mr3.js\",\n"
- "\"jstests/indexh.js\",\n"
- "\"jstests/apitest_db.js\",\n"
- "\"jstests/evalb.js\"] );\n"
- "var serialTestsArr = [ \"jstests/fsync.js\",\n"
- "\"jstests/fsync2.js\" ];\n"
- "var serialTests = makeKeys( serialTestsArr );\n"
- "params[ 0 ] = serialTestsArr;\n"
- "var files = listFiles(\"jstests\");\n"
- "files = Array.shuffle( files );\n"
- "var i = 0;\n"
- "files.forEach(\n"
- "function(x) {\n"
- "if ( ( /[\\/\\\\]_/.test(x.name) ) ||\n"
- "( ! /\\.js$/.test(x.name ) ) ||\n"
- "( x.name in skipTests ) ||\n"
- "( x.name in serialTests ) ||\n"
- "! /\\.js$/.test(x.name ) ){\n"
- "print(\" >>>>>>>>>>>>>>> skipping \" + x.name);\n"
- "return;}\n"
- "params[ i % n ].push( x.name );\n"
- "++i;}\n"
- ");\n"
- "params[ 0 ] = Array.shuffle( params[ 0 ] );\n"
- "for( var i in params ) {\n"
- "params[ i ].unshift( i );}\n"
- "return params;}\n"
- "ParallelTester.fileTester = function() {\n"
- "var args = argumentsToArray( arguments );\n"
- "var suite = args.shift();\n"
- "args.forEach(\n"
- "function( x ) {\n"
- "print(\" S\" + suite + \" Test : \" + x + \" ...\");\n"
- "var time = Date.timeFunc( function() { load(x); }, 1);\n"
- "print(\" S\" + suite + \" Test : \" + x + \" \" + time + \"ms\" );}\n"
- ");}\n"
- "assert.parallelTests = function( params, msg, newScopes ) {\n"
- "newScopes = newScopes || false;\n"
- "var wrapper = function( fun, argv ) {\n"
- "eval (\n"
- "\"var z = function() {\" +\n"
- "\"var __parallelTests__fun = \" + fun.toString() + \";\" +\n"
- "\"var __parallelTests__argv = \" + tojson( argv ) + \";\" +\n"
- "\"var __parallelTests__passed = false;\" +\n"
- "\"try {\" +\n"
- "\"__parallelTests__fun.apply( 0, __parallelTests__argv );\" +\n"
- "\"__parallelTests__passed = true;\" +\n"
- "\"} catch ( e ) {\" +\n"
- "\"print( e );\" +\n"
- "\"}\" +\n"
- "\"return __parallelTests__passed;\" +\n"
- "\"}\"\n"
- ");\n"
- "return z;}\n"
- "var runners = new Array();\n"
- "for( var i in params ) {\n"
- "var param = params[ i ];\n"
- "var test = param.shift();\n"
- "var t;\n"
- "if ( newScopes )\n"
- "t = new ScopedThread( wrapper( test, param ) );\n"
- "else\n"
- "t = new Thread( wrapper( test, param ) );\n"
- "runners.push( t );}\n"
- "runners.forEach( function( x ) { x.start(); } );\n"
- "var nFailed = 0;\n"
- "runners.forEach( function( x ) { if( !x.returnData() ) { ++nFailed; } } );\n"
- "assert.eq( 0, nFailed, msg );}}\n"
- "tojsononeline = function( x ){\n"
- "return tojson( x , \" \" , true );}\n"
- "tojson = function( x, indent , nolint ){\n"
- "if ( x === null )\n"
- "return \"null\";\n"
- "if ( x === undefined )\n"
- "return \"undefined\";\n"
- "if (!indent)\n"
- "indent = \"\";\n"
- "switch ( typeof x ) {\n"
- "case \"string\": {\n"
- "var s = \"\\\"\";\n"
- "for ( var i=0; i<x.length; i++ ){\n"
- "switch (x[i]){\n"
- "case '\"': s += '\\\\\"'; break;\n"
- "case '\\\\': s += '\\\\\\\\'; break;\n"
- "case '\\b': s += '\\\\b'; break;\n"
- "case '\\f': s += '\\\\f'; break;\n"
- "case '\\n': s += '\\\\n'; break;\n"
- "case '\\r': s += '\\\\r'; break;\n"
- "case '\\t': s += '\\\\t'; break;\n"
- "default: {\n"
- "var code = x.charCodeAt(i);\n"
- "if (code < 0x20){\n"
- "s += (code < 0x10 ? '\\\\u000' : '\\\\u00') + code.toString(16);\n"
- "} else {\n"
- "s += x[i];}}}}\n"
- "return s + \"\\\"\";}\n"
- "case \"number\":\n"
- "case \"boolean\":\n"
- "return \"\" + x;\n"
- "case \"object\":{\n"
- "var s = tojsonObject( x, indent , nolint );\n"
- "if ( ( nolint == null || nolint == true ) && s.length < 80 && ( indent == null || indent.length == 0 ) ){\n"
- "s = s.replace( /[\\s\\r\\n ]+/gm , \" \" );}\n"
- "return s;}\n"
- "case \"function\":\n"
- "return x.toString();\n"
- "default:\n"
- "throw \"tojson can't handle type \" + ( typeof x );}}\n"
- "tojsonObject = function( x, indent , nolint ){\n"
- "var lineEnding = nolint ? \" \" : \"\\n\";\n"
- "var tabSpace = nolint ? \"\" : \"\\t\";\n"
- "assert.eq( ( typeof x ) , \"object\" , \"tojsonObject needs object, not [\" + ( typeof x ) + \"]\" );\n"
- "if (!indent)\n"
- "indent = \"\";\n"
- "if ( typeof( x.tojson ) == \"function\" && x.tojson != tojson ) {\n"
- "return x.tojson(indent,nolint);}\n"
- "if ( x.constructor && typeof( x.constructor.tojson ) == \"function\" && x.constructor.tojson != tojson ) {\n"
- "return x.constructor.tojson( x, indent , nolint );}\n"
- "if ( x.toString() == \"[object MaxKey]\" )\n"
- "return \"{ $maxKey : 1 }\";\n"
- "if ( x.toString() == \"[object MinKey]\" )\n"
- "return \"{ $minKey : 1 }\";\n"
- "var s = \"{\" + lineEnding;\n"
- "indent += tabSpace;\n"
- "var total = 0;\n"
- "for ( var k in x ) total++;\n"
- "if ( total == 0 ) {\n"
- "s += indent + lineEnding;}\n"
- "var keys = x;\n"
- "if ( typeof( x._simpleKeys ) == \"function\" )\n"
- "keys = x._simpleKeys();\n"
- "var num = 1;\n"
- "for ( var k in keys ){\n"
- "var val = x[k];\n"
- "if ( val == DB.prototype || val == DBCollection.prototype )\n"
- "continue;\n"
- "s += indent + \"\\\"\" + k + \"\\\" : \" + tojson( val, indent , nolint );\n"
- "if (num != total) {\n"
- "s += \",\";\n"
- "num++;}\n"
- "s += lineEnding;}\n"
- "indent = indent.substring(1);\n"
- "return s + indent + \"}\";}\n"
- "shellPrint = function( x ){\n"
- "it = x;\n"
- "if ( x != undefined )\n"
- "shellPrintHelper( x );\n"
- "if ( db ){\n"
- "var e = db.getPrevError();\n"
- "if ( e.err ) {\n"
- "if( e.nPrev <= 1 )\n"
- "print( \"error on last call: \" + tojson( e.err ) );\n"
- "else\n"
- "print( \"an error \" + tojson(e.err) + \" occurred \" + e.nPrev + \" operations back in the command invocation\" );}\n"
- "db.resetError();}}\n"
- "printjson = function(x){\n"
- "print( tojson( x ) );}\n"
- "printjsononeline = function(x){\n"
- "print( tojsononeline( x ) );}\n"
- "shellPrintHelper = function( x ){\n"
- "if ( typeof( x ) == \"undefined\" ){\n"
- "if ( typeof( db ) != \"undefined\" && db.getLastError ){\n"
- "var e = db.getLastError();\n"
- "if ( e != null )\n"
- "print( e );}\n"
- "return;}\n"
- "if ( x == __magicNoPrint )\n"
- "return;\n"
- "if ( x == null ){\n"
- "print( \"null\" );\n"
- "return;}\n"
- "if ( typeof x != \"object\" )\n"
- "return print( x );\n"
- "var p = x.shellPrint;\n"
- "if ( typeof p == \"function\" )\n"
- "return x.shellPrint();\n"
- "var p = x.tojson;\n"
- "if ( typeof p == \"function\" )\n"
- "print( x.tojson() );\n"
- "else\n"
- "print( tojson( x ) );}\n"
- "shellAutocomplete = function( prefix ){\n"
- "var a = [];\n"
- "//a.push( prefix + \"z\" )\n"
- "//a.push( prefix + \"y\" )\n"
- "__autocomplete__ = a;}\n"
- "shellHelper = function( command , rest , shouldPrint ){\n"
- "command = command.trim();\n"
- "var args = rest.trim().replace(/;$/,\"\").split( \"\\s+\" );\n"
- "if ( ! shellHelper[command] )\n"
- "throw \"no command [\" + command + \"]\";\n"
- "var res = shellHelper[command].apply( null , args );\n"
- "if ( shouldPrint ){\n"
- "shellPrintHelper( res );}\n"
- "return res;}\n"
- "shellHelper.use = function( dbname ){\n"
- "db = db.getMongo().getDB( dbname );\n"
- "print( \"switched to db \" + db.getName() );}\n"
- "shellHelper.it = function(){\n"
- "if ( typeof( ___it___ ) == \"undefined\" || ___it___ == null ){\n"
- "print( \"no cursor\" );\n"
- "return;}\n"
- "shellPrintHelper( ___it___ );}\n"
- "shellHelper.show = function( what ){\n"
- "assert( typeof what == \"string\" );\n"
- "if( what == \"profile\" ) {\n"
- "if( db.system.profile.count() == 0 ) {\n"
- "print(\"db.system.profile is empty\");\n"
- "print(\"Use db.setProfilingLevel(2) will enable profiling\");\n"
- "print(\"Use db.system.profile.find() to show raw profile entries\");}\n"
- "else {\n"
- "print();\n"
- "db.system.profile.find({ millis : { $gt : 0 } }).sort({$natural:-1}).limit(5).forEach( function(x){print(\"\"+x.millis+\"ms \" + String(x.ts).substring(0,24)); print(x.info); print(\"\\n\");} )}\n"
- "return \"\";}\n"
- "if ( what == \"users\" ){\n"
- "db.system.users.find().forEach( printjson );\n"
- "return \"\";}\n"
- "if ( what == \"collections\" || what == \"tables\" ) {\n"
- "db.getCollectionNames().forEach( function(x){print(x)} );\n"
- "return \"\";}\n"
- "if ( what == \"dbs\" ) {\n"
- "db.getMongo().getDBNames().sort().forEach( function(x){print(x)} );\n"
- "return \"\";}\n"
- "throw \"don't know how to show [\" + what + \"]\";}\n"
- "if ( typeof( Map ) == \"undefined\" ){\n"
- "Map = function(){\n"
- "this._data = {};}}\n"
- "Map.hash = function( val ){\n"
- "if ( ! val )\n"
- "return val;\n"
- "switch ( typeof( val ) ){\n"
- "case 'string':\n"
- "case 'number':\n"
- "case 'date':\n"
- "return val.toString();\n"
- "case 'object':\n"
- "case 'array':\n"
- "var s = \"\";\n"
- "for ( var k in val ){\n"
- "s += k + val[k];}\n"
- "return s;}\n"
- "throw \"can't hash : \" + typeof( val );}\n"
- "Map.prototype.put = function( key , value ){\n"
- "var o = this._get( key );\n"
- "var old = o.value;\n"
- "o.value = value;\n"
- "return old;}\n"
- "Map.prototype.get = function( key ){\n"
- "return this._get( key ).value;}\n"
- "Map.prototype._get = function( key ){\n"
- "var h = Map.hash( key );\n"
- "var a = this._data[h];\n"
- "if ( ! a ){\n"
- "a = [];\n"
- "this._data[h] = a;}\n"
- "for ( var i=0; i<a.length; i++ ){\n"
- "if ( friendlyEqual( key , a[i].key ) ){\n"
- "return a[i];}}\n"
- "var o = { key : key , value : null };\n"
- "a.push( o );\n"
- "return o;}\n"
- "Map.prototype.values = function(){\n"
- "var all = [];\n"
- "for ( var k in this._data ){\n"
- "this._data[k].forEach( function(z){ all.push( z.value ); } );}\n"
- "return all;}\n"
- "if ( typeof( gc ) == \"undefined\" ){\n"
- "gc = function(){\n"
- "print( \"warning: using noop gc()\" );}}\n"
- "Math.sigFig = function( x , N ){\n"
- "if ( ! N ){\n"
- "N = 3;}\n"
- "var p = Math.pow( 10, N - Math.ceil( Math.log( Math.abs(x) ) / Math.log( 10 )) );\n"
- "return Math.round(x*p)/p;}\n"
- "Random = function() {}\n"
- "Random.srand = function( s ) { _srand( s ); }\n"
- "Random.rand = function() { return _rand(); }\n"
- "Random.randInt = function( n ) { return Math.floor( Random.rand() * n ); }\n"
- "Random.setRandomSeed = function( s ) {\n"
- "s = s || new Date().getTime();\n"
- "print( \"setting random seed: \" + s );\n"
- "Random.srand( s );}\n"
- "Random.genExp = function( mean ) {\n"
- "return -Math.log( Random.rand() ) * mean;}\n"
- "killWithUris = function( uris ) {\n"
- "var inprog = db.currentOp().inprog;\n"
- "for( var u in uris ) {\n"
- "for ( var i in inprog ) {\n"
- "if ( uris[ u ] == inprog[ i ].client ) {\n"
- "db.killOp( inprog[ i ].opid );}}}}\n"
- "Geo = {};\n"
- "Geo.distance = function( a , b ){\n"
- "var ax = null;\n"
- "var ay = null;\n"
- "var bx = null;\n"
- "var by = null;\n"
- "for ( var key in a ){\n"
- "if ( ax == null )\n"
- "ax = a[key];\n"
- "else if ( ay == null )\n"
- "ay = a[key];}\n"
- "for ( var key in b ){\n"
- "if ( bx == null )\n"
- "bx = b[key];\n"
- "else if ( by == null )\n"
- "by = b[key];}\n"
- "return Math.sqrt( Math.pow( by - ay , 2 ) +\n"
- "Math.pow( bx - ax , 2 ) );}\n"
- "rs = function () { return \"try rs.help()\"; }\n"
- "rs.help = function () {\n"
- "print(\"\\trs.status() { replSetGetStatus : 1 } checks repl set status\");\n"
- "print(\"\\trs.initiate() { replSetInitiate : null } initiates set with default settings\");\n"
- "print(\"\\trs.initiate(cfg) { replSetInitiate : cfg } initiates set with configuration cfg\");\n"
- "print(\"\\trs.add(hostportstr) add a new member to the set with default attributes\");\n"
- "print(\"\\trs.add(membercfgobj) add a new member to the set with extra attributes\");\n"
- "print(\"\\trs.addArb(hostportstr) add a new member which is arbiterOnly:true\");\n"
- "print(\"\\trs.stepDown() step down as primary (momentarily)\");\n"
- "print(\"\\trs.conf() return configuration from local.system.replset\");\n"
- "print(\"\\trs.slaveOk() shorthand for db.getMongo().setSlaveOk()\");\n"
- "print();\n"
- "print(\"\\tdb.isMaster() check who is primary\");\n"
- "print();\n"
- "print(\"\\tsee also http://<mongod_host>:28017/_replSet for additional diagnostic info\");}\n"
- "rs.slaveOk = function () { return db.getMongo().setSlaveOk(); }\n"
- "rs.status = function () { return db._adminCommand(\"replSetGetStatus\"); }\n"
- "rs.isMaster = function () { return db.isMaster(); }\n"
- "rs.initiate = function (c) { return db._adminCommand({ replSetInitiate: c }); }\n"
- "rs.add = function (hostport, arb) {\n"
- "var cfg = hostport;\n"
- "var local = db.getSisterDB(\"local\");\n"
- "assert(local.system.replset.count() <= 1, \"error: local.system.replset has unexpected contents\");\n"
- "var c = local.system.replset.findOne();\n"
- "assert(c, \"no config object retrievable from local.system.replset\");\n"
- "c.version++;\n"
- "var max = 0;\n"
- "for (var i in c.members)\n"
- "if (c.members[i]._id > max) max = c.members[i]._id;\n"
- "if (isString(hostport)) {\n"
- "cfg = { _id: max + 1, host: hostport };\n"
- "if (arb)\n"
- "cfg.arbiterOnly = true;}\n"
- "c.members.push(cfg);\n"
- "return db._adminCommand({ replSetReconfig: c });}\n"
- "rs.stepDown = function () { return db._adminCommand({ replSetStepDown:true}); }\n"
- "rs.addArb = function (hn) { return this.add(hn, true); }\n"
- "rs.conf = function () { return db.getSisterDB(\"local\").system.replset.findOne(); }\n"
- "help = shellHelper.help = function (x) {\n"
- "if (x == \"connect\") {\n"
- "print(\"\\nNormally one specifies the server on the mongo shell command line. Run mongo --help to see those options.\");\n"
- "print(\"Additional connections may be opened:\\n\");\n"
- "print(\" var x = new Mongo('host[:port]');\");\n"
- "print(\" var mydb = x.getDB('mydb');\");\n"
- "print(\" or\");\n"
- "print(\" var mydb = connect('host[:port]/mydb');\");\n"
- "print(\"\\nNote: the REPL prompt only auto-reports getLastError() for the shell command line connection.\\n\");\n"
- "return;}\n"
- "if (x == \"misc\") {\n"
- "print(\"\\tb = new BinData(subtype,base64str) create a BSON BinData value\");\n"
- "print(\"\\tb.subtype() the BinData subtype (0..255)\");\n"
- "print(\"\\tb.length() length of the BinData data in bytes\");\n"
- "print(\"\\tb.hex() the data as a hex encoded string\");\n"
- "print(\"\\tb.base64() the data as a base 64 encoded string\");\n"
- "print(\"\\tb.toString()\");\n"
- "return;}\n"
- "if (x == \"admin\") {\n"
- "print(\"\\tls([path]) list files\");\n"
- "print(\"\\tpwd() returns current directory\");\n"
- "print(\"\\tlistFiles([path]) returns file list\");\n"
- "print(\"\\thostname() returns name of this host\");\n"
- "print(\"\\tcat(fname) returns contents of text file as a string\");\n"
- "print(\"\\tremoveFile(f) delete a file\");\n"
- "print(\"\\tload(jsfilename) load and execute a .js file\");\n"
- "print(\"\\trun(program[, args...]) spawn a program and wait for its completion\");\n"
- "print(\"\\tsleep(m) sleep m milliseconds\");\n"
- "print(\"\\tgetMemInfo() diagnostic\");\n"
- "return;}\n"
- "if (x == \"test\") {\n"
- "print(\"\\tstartMongodEmpty(args) DELETES DATA DIR and then starts mongod\");\n"
- "print(\"\\t returns a connection to the new server\");\n"
- "print(\"\\tstartMongodTest() DELETES DATA DIR\");\n"
- "print(\"\\t automatically picks port #s starting at 27000 and increasing\");\n"
- "print(\"\\t or you can specify the port as the first arg\");\n"
- "print(\"\\t dir is /data/db/<port>/ if not specified as the 2nd arg\");\n"
- "print(\"\\t returns a connection to the new server\");\n"
- "return;}\n"
- "print(\"\\t\" + \"db.help() help on db methods\");\n"
- "print(\"\\t\" + \"db.mycoll.help() help on collection methods\");\n"
- "print(\"\\t\" + \"rs.help() help on replica set methods\");\n"
- "print(\"\\t\" + \"help connect connecting to a db help\");\n"
- "print(\"\\t\" + \"help admin administrative help\");\n"
- "print(\"\\t\" + \"help misc misc things to know\");\n"
- "print();\n"
- "print(\"\\t\" + \"show dbs show database names\");\n"
- "print(\"\\t\" + \"show collections show collections in current database\");\n"
- "print(\"\\t\" + \"show users show users in current database\");\n"
- "print(\"\\t\" + \"show profile show most recent system.profile entries with time >= 1ms\");\n"
- "print(\"\\t\" + \"use <db_name> set current database\");\n"
- "print(\"\\t\" + \"db.foo.find() list objects in collection foo\");\n"
- "print(\"\\t\" + \"db.foo.find( { a : 1 } ) list objects in foo where a == 1\");\n"
- "print(\"\\t\" + \"it result of the last line evaluated; use to further iterate\");\n"
- "print(\"\\t\" + \"exit quit the mongo shell\");}\n"
- "if ( typeof DB == \"undefined\" ){\n"
- "DB = function( mongo , name ){\n"
- "this._mongo = mongo;\n"
- "this._name = name;}}\n"
- "DB.prototype.getMongo = function(){\n"
- "assert( this._mongo , \"why no mongo!\" );\n"
- "return this._mongo;}\n"
- "DB.prototype.getSisterDB = function( name ){\n"
- "return this.getMongo().getDB( name );}\n"
- "DB.prototype.getName = function(){\n"
- "return this._name;}\n"
- "DB.prototype.stats = function(){\n"
- "return this.runCommand( { dbstats : 1 } );}\n"
- "DB.prototype.getCollection = function( name ){\n"
- "return new DBCollection( this._mongo , this , name , this._name + \".\" + name );}\n"
- "DB.prototype.commandHelp = function( name ){\n"
- "var c = {};\n"
- "c[name] = 1;\n"
- "c.help = true;\n"
- "return this.runCommand( c ).help;}\n"
- "DB.prototype.runCommand = function( obj ){\n"
- "if ( typeof( obj ) == \"string\" ){\n"
- "var n = {};\n"
- "n[obj] = 1;\n"
- "obj = n;}\n"
- "return this.getCollection( \"$cmd\" ).findOne( obj );}\n"
- "DB.prototype._dbCommand = DB.prototype.runCommand;\n"
- "DB.prototype._adminCommand = function( obj ){\n"
- "if ( this._name == \"admin\" )\n"
- "return this.runCommand( obj );\n"
- "return this.getSisterDB( \"admin\" ).runCommand( obj );}\n"
- "DB.prototype.addUser = function( username , pass, readOnly ){\n"
- "readOnly = readOnly || false;\n"
- "var c = this.getCollection( \"system.users\" );\n"
- "var u = c.findOne( { user : username } ) || { user : username };\n"
- "u.readOnly = readOnly;\n"
- "u.pwd = hex_md5( username + \":mongo:\" + pass );\n"
- "print( tojson( u ) );\n"
- "c.save( u );}\n"
- "DB.prototype.removeUser = function( username ){\n"
- "this.getCollection( \"system.users\" ).remove( { user : username } );}\n"
- "DB.prototype.__pwHash = function( nonce, username, pass ) {\n"
- "return hex_md5( nonce + username + hex_md5( username + \":mongo:\" + pass ) );}\n"
- "DB.prototype.auth = function( username , pass ){\n"
- "var n = this.runCommand( { getnonce : 1 } );\n"
- "var a = this.runCommand(\n"
- "{\n"
- "authenticate : 1 ,\n"
- "user : username ,\n"
- "nonce : n.nonce ,\n"
- "key : this.__pwHash( n.nonce, username, pass )}\n"
- ");\n"
- "return a.ok;}\n"
- "\n"
- "DB.prototype.createCollection = function(name, opt) {\n"
- "var options = opt || {};\n"
- "var cmd = { create: name, capped: options.capped, size: options.size, max: options.max };\n"
- "var res = this._dbCommand(cmd);\n"
- "return res;}\n"
- "\n"
- "DB.prototype.getProfilingLevel = function() {\n"
- "var res = this._dbCommand( { profile: -1 } );\n"
- "return res ? res.was : null;}\n"
- "\n"
- "DB.prototype.dropDatabase = function() {\n"
- "if ( arguments.length )\n"
- "throw \"dropDatabase doesn't take arguments\";\n"
- "return this._dbCommand( { dropDatabase: 1 } );}\n"
- "DB.prototype.shutdownServer = function() {\n"
- "if( \"admin\" != this._name ){\n"
- "return \"shutdown command only works with the admin database; try 'use admin'\";}\n"
- "try {\n"
- "var res = this._dbCommand(\"shutdown\");\n"
- "if( res )\n"
- "throw \"shutdownServer failed: \" + res.errmsg;\n"
- "throw \"shutdownServer failed\";}\n"
- "catch ( e ){\n"
- "assert( tojson( e ).indexOf( \"error doing query: failed\" ) >= 0 , \"unexpected error: \" + tojson( e ) );\n"
- "print( \"server should be down...\" );}}\n"
- "\n"
- "DB.prototype.cloneDatabase = function(from) {\n"
- "assert( isString(from) && from.length );\n"
- "return this._dbCommand( { clone: from } );}\n"
- "\n"
- "DB.prototype.cloneCollection = function(from, collection, query) {\n"
- "assert( isString(from) && from.length );\n"
- "assert( isString(collection) && collection.length );\n"
- "collection = this._name + \".\" + collection;\n"
- "query = query || {};\n"
- "return this._dbCommand( { cloneCollection:collection, from:from, query:query } );}\n"
- "\n"
- "DB.prototype.copyDatabase = function(fromdb, todb, fromhost, username, password) {\n"
- "assert( isString(fromdb) && fromdb.length );\n"
- "assert( isString(todb) && todb.length );\n"
- "fromhost = fromhost || \"\";\n"
- "if ( username && password ) {\n"
- "var n = this._adminCommand( { copydbgetnonce : 1, fromhost:fromhost } );\n"
- "return this._adminCommand( { copydb:1, fromhost:fromhost, fromdb:fromdb, todb:todb, username:username, nonce:n.nonce, key:this.__pwHash( n.nonce, username, password ) } );\n"
- "} else {\n"
- "return this._adminCommand( { copydb:1, fromhost:fromhost, fromdb:fromdb, todb:todb } );}}\n"
- "\n"
- "DB.prototype.repairDatabase = function() {\n"
- "return this._dbCommand( { repairDatabase: 1 } );}\n"
- "DB.prototype.help = function() {\n"
- "print(\"DB methods:\");\n"
- "print(\"\\tdb.addUser(username, password[, readOnly=false])\");\n"
- "print(\"\\tdb.auth(username, password)\");\n"
- "print(\"\\tdb.cloneDatabase(fromhost)\");\n"
- "print(\"\\tdb.commandHelp(name) returns the help for the command\");\n"
- "print(\"\\tdb.copyDatabase(fromdb, todb, fromhost)\");\n"
- "print(\"\\tdb.createCollection(name, { size : ..., capped : ..., max : ... } )\");\n"
- "print(\"\\tdb.currentOp() displays the current operation in the db\");\n"
- "print(\"\\tdb.dropDatabase()\");\n"
- "print(\"\\tdb.eval(func, args) run code server-side\");\n"
- "print(\"\\tdb.getCollection(cname) same as db['cname'] or db.cname\");\n"
- "print(\"\\tdb.getCollectionNames()\");\n"
- "print(\"\\tdb.getLastError() - just returns the err msg string\");\n"
- "print(\"\\tdb.getLastErrorObj() - return full status object\");\n"
- "print(\"\\tdb.getMongo() get the server connection object\");\n"
- "print(\"\\tdb.getMongo().setSlaveOk() allow this connection to read from the nonmaster member of a replica pair\");\n"
- "print(\"\\tdb.getName()\");\n"
- "print(\"\\tdb.getPrevError()\");\n"
- "print(\"\\tdb.getProfilingLevel()\");\n"
- "print(\"\\tdb.getReplicationInfo()\");\n"
- "print(\"\\tdb.getSisterDB(name) get the db at the same server as this one\");\n"
- "print(\"\\tdb.isMaster() check replica primary status\");\n"
- "print(\"\\tdb.killOp(opid) kills the current operation in the db\");\n"
- "print(\"\\tdb.listCommands() lists all the db commands\");\n"
- "print(\"\\tdb.printCollectionStats()\");\n"
- "print(\"\\tdb.printReplicationInfo()\");\n"
- "print(\"\\tdb.printSlaveReplicationInfo()\");\n"
- "print(\"\\tdb.printShardingStatus()\");\n"
- "print(\"\\tdb.removeUser(username)\");\n"
- "print(\"\\tdb.repairDatabase()\");\n"
- "print(\"\\tdb.resetError()\");\n"
- "print(\"\\tdb.runCommand(cmdObj) run a database command. if cmdObj is a string, turns it into { cmdObj : 1 }\");\n"
- "print(\"\\tdb.serverStatus()\");\n"
- "print(\"\\tdb.setProfilingLevel(level,<slowms>) 0=off 1=slow 2=all\");\n"
- "print(\"\\tdb.shutdownServer()\");\n"
- "print(\"\\tdb.stats()\");\n"
- "print(\"\\tdb.version() current version of the server\");\n"
- "print(\"\\tdb.getMongo().setSlaveOk() allow queries on a replication slave server\");\n"
- "return __magicNoPrint;}\n"
- "DB.prototype.printCollectionStats = function(){\n"
- "var mydb = this;\n"
- "this.getCollectionNames().forEach(\n"
- "function(z){\n"
- "print( z );\n"
- "printjson( mydb.getCollection(z).stats() );\n"
- "print( \"---\" );}\n"
- ");}\n"
- "\n"
- "DB.prototype.setProfilingLevel = function(level,slowms) {\n"
- "if (level < 0 || level > 2) {\n"
- "throw { dbSetProfilingException : \"input level \" + level + \" is out of range [0..2]\" };}\n"
- "var cmd = { profile: level };\n"
- "if ( slowms )\n"
- "cmd[\"slowms\"] = slowms;\n"
- "return this._dbCommand( cmd );}\n"
- "\n"
- "DB.prototype.eval = function(jsfunction) {\n"
- "var cmd = { $eval : jsfunction };\n"
- "if ( arguments.length > 1 ) {\n"
- "cmd.args = argumentsToArray( arguments ).slice(1);}\n"
- "var res = this._dbCommand( cmd );\n"
- "if (!res.ok)\n"
- "throw tojson( res );\n"
- "return res.retval;}\n"
- "DB.prototype.dbEval = DB.prototype.eval;\n"
- "\n"
- "DB.prototype.groupeval = function(parmsObj) {\n"
- "var groupFunction = function() {\n"
- "var parms = args[0];\n"
- "var c = db[parms.ns].find(parms.cond||{});\n"
- "var map = new Map();\n"
- "var pks = parms.key ? Object.keySet( parms.key ) : null;\n"
- "var pkl = pks ? pks.length : 0;\n"
- "var key = {};\n"
- "while( c.hasNext() ) {\n"
- "var obj = c.next();\n"
- "if ( pks ) {\n"
- "for( var i=0; i<pkl; i++ ){\n"
- "var k = pks[i];\n"
- "key[k] = obj[k];}}\n"
- "else {\n"
- "key = parms.$keyf(obj);}\n"
- "var aggObj = map.get(key);\n"
- "if( aggObj == null ) {\n"
- "var newObj = Object.extend({}, key);\n"
- "aggObj = Object.extend(newObj, parms.initial)\n"
- "map.put( key , aggObj );}\n"
- "parms.$reduce(obj, aggObj);}\n"
- "return map.values();}\n"
- "return this.eval(groupFunction, this._groupFixParms( parmsObj ));}\n"
- "DB.prototype.groupcmd = function( parmsObj ){\n"
- "var ret = this.runCommand( { \"group\" : this._groupFixParms( parmsObj ) } );\n"
- "if ( ! ret.ok ){\n"
- "throw \"group command failed: \" + tojson( ret );}\n"
- "return ret.retval;}\n"
- "DB.prototype.group = DB.prototype.groupcmd;\n"
- "DB.prototype._groupFixParms = function( parmsObj ){\n"
- "var parms = Object.extend({}, parmsObj);\n"
- "if( parms.reduce ) {\n"
- "parms.$reduce = parms.reduce;\n"
- "delete parms.reduce;}\n"
- "if( parms.keyf ) {\n"
- "parms.$keyf = parms.keyf;\n"
- "delete parms.keyf;}\n"
- "return parms;}\n"
- "DB.prototype.resetError = function(){\n"
- "return this.runCommand( { reseterror : 1 } );}\n"
- "DB.prototype.forceError = function(){\n"
- "return this.runCommand( { forceerror : 1 } );}\n"
- "DB.prototype.getLastError = function( w , wtimeout ){\n"
- "var res = this.getLastErrorObj( w , wtimeout );\n"
- "if ( ! res.ok )\n"
- "throw \"getlasterror failed: \" + tojson( res );\n"
- "return res.err;}\n"
- "DB.prototype.getLastErrorObj = function( w , wtimeout ){\n"
- "var cmd = { getlasterror : 1 };\n"
- "if ( w ){\n"
- "cmd.w = w;\n"
- "if ( wtimeout )\n"
- "cmd.wtimeout = wtimeout;}\n"
- "var res = this.runCommand( cmd );\n"
- "if ( ! res.ok )\n"
- "throw \"getlasterror failed: \" + tojson( res );\n"
- "return res;}\n"
- "DB.prototype.getLastErrorCmd = DB.prototype.getLastErrorObj;\n"
- "/* Return the last error which has occurred, even if not the very last error.\n"
- "Returns:\n"
- "{ err : <error message>, nPrev : <how_many_ops_back_occurred>, ok : 1 }\n"
- "result.err will be null if no error has occurred.\n"
- "*/\n"
- "DB.prototype.getPrevError = function(){\n"
- "return this.runCommand( { getpreverror : 1 } );}\n"
- "DB.prototype.getCollectionNames = function(){\n"
- "var all = [];\n"
- "var nsLength = this._name.length + 1;\n"
- "var c = this.getCollection( \"system.namespaces\" ).find();\n"
- "while ( c.hasNext() ){\n"
- "var name = c.next().name;\n"
- "if ( name.indexOf( \"$\" ) >= 0 && name.indexOf( \".oplog.$\" ) < 0 )\n"
- "continue;\n"
- "all.push( name.substring( nsLength ) );}\n"
- "return all.sort();}\n"
- "DB.prototype.tojson = function(){\n"
- "return this._name;}\n"
- "DB.prototype.toString = function(){\n"
- "return this._name;}\n"
- "DB.prototype.isMaster = function () { return this.runCommand(\"isMaster\"); }\n"
- "DB.prototype.currentOp = function(){\n"
- "return db.$cmd.sys.inprog.findOne();}\n"
- "DB.prototype.currentOP = DB.prototype.currentOp;\n"
- "DB.prototype.killOp = function(op) {\n"
- "if( !op )\n"
- "throw \"no opNum to kill specified\";\n"
- "return db.$cmd.sys.killop.findOne({'op':op});}\n"
- "DB.prototype.killOP = DB.prototype.killOp;\n"
- "DB.tsToSeconds = function(x){\n"
- "if ( x.t && x.i )\n"
- "return x.t / 1000;\n"
- "return x / 4294967296;}\n"
- "\n"
- "DB.prototype.getReplicationInfo = function() {\n"
- "var db = this.getSisterDB(\"local\");\n"
- "var result = { };\n"
- "var ol = db.system.namespaces.findOne({name:\"local.oplog.$main\"});\n"
- "if( ol && ol.options ) {\n"
- "result.logSizeMB = ol.options.size / 1000 / 1000;\n"
- "} else {\n"
- "result.errmsg = \"local.oplog.$main, or its options, not found in system.namespaces collection (not --master?)\";\n"
- "return result;}\n"
- "var firstc = db.oplog.$main.find().sort({$natural:1}).limit(1);\n"
- "var lastc = db.oplog.$main.find().sort({$natural:-1}).limit(1);\n"
- "if( !firstc.hasNext() || !lastc.hasNext() ) {\n"
- "result.errmsg = \"objects not found in local.oplog.$main -- is this a new and empty db instance?\";\n"
- "result.oplogMainRowCount = db.oplog.$main.count();\n"
- "return result;}\n"
- "var first = firstc.next();\n"
- "var last = lastc.next();\n"
- "{\n"
- "var tfirst = first.ts;\n"
- "var tlast = last.ts;\n"
- "if( tfirst && tlast ) {\n"
- "tfirst = DB.tsToSeconds( tfirst );\n"
- "tlast = DB.tsToSeconds( tlast );\n"
- "result.timeDiff = tlast - tfirst;\n"
- "result.timeDiffHours = Math.round(result.timeDiff / 36)/100;\n"
- "result.tFirst = (new Date(tfirst*1000)).toString();\n"
- "result.tLast = (new Date(tlast*1000)).toString();\n"
- "result.now = Date();}\n"
- "else {\n"
- "result.errmsg = \"ts element not found in oplog objects\";}}\n"
- "return result;}\n"
- "DB.prototype.printReplicationInfo = function() {\n"
- "var result = this.getReplicationInfo();\n"
- "if( result.errmsg ) {\n"
- "print(tojson(result));\n"
- "return;}\n"
- "print(\"configured oplog size: \" + result.logSizeMB + \"MB\");\n"
- "print(\"log length start to end: \" + result.timeDiff + \"secs (\" + result.timeDiffHours + \"hrs)\");\n"
- "print(\"oplog first event time: \" + result.tFirst);\n"
- "print(\"oplog last event time: \" + result.tLast);\n"
- "print(\"now: \" + result.now);}\n"
- "DB.prototype.printSlaveReplicationInfo = function() {\n"
- "function g(x) {\n"
- "assert( x , \"how could this be null (printSlaveReplicationInfo gx)\" )\n"
- "print(\"source: \" + x.host);\n"
- "if ( x.syncedTo ){\n"
- "var st = new Date( DB.tsToSeconds( x.syncedTo ) * 1000 );\n"
- "var now = new Date();\n"
- "print(\"\\t syncedTo: \" + st.toString() );\n"
- "var ago = (now-st)/1000;\n"
- "var hrs = Math.round(ago/36)/100;\n"
- "print(\"\\t\\t = \" + Math.round(ago) + \"secs ago (\" + hrs + \"hrs)\");}\n"
- "else {\n"
- "print( \"\\t doing initial sync\" );}}\n"
- "var L = this.getSisterDB(\"local\");\n"
- "if( L.sources.count() == 0 ) {\n"
- "print(\"local.sources is empty; is this db a --slave?\");\n"
- "return;}\n"
- "L.sources.find().forEach(g);}\n"
- "DB.prototype.serverBuildInfo = function(){\n"
- "return this._adminCommand( \"buildinfo\" );}\n"
- "DB.prototype.serverStatus = function(){\n"
- "return this._adminCommand( \"serverStatus\" );}\n"
- "DB.prototype.serverCmdLineOpts = function(){\n"
- "return this._adminCommand( \"getCmdLineOpts\" );}\n"
- "DB.prototype.version = function(){\n"
- "return this.serverBuildInfo().version;}\n"
- "DB.prototype.listCommands = function(){\n"
- "var x = this.runCommand( \"listCommands\" );\n"
- "for ( var name in x.commands ){\n"
- "var c = x.commands[name];\n"
- "var s = name + \": \";\n"
- "switch ( c.lockType ){\n"
- "case -1: s += \"read-lock\"; break;\n"
- "case 0: s += \"no-lock\"; break;\n"
- "case 1: s += \"write-lock\"; break;\n"
- "default: s += c.lockType;}\n"
- "if (c.adminOnly) s += \" adminOnly \";\n"
- "if (c.adminOnly) s += \" slaveOk \";\n"
- "s += \"\\n \";\n"
- "s += c.help.replace(/\\n/g, '\\n ');\n"
- "s += \"\\n\";\n"
- "print( s );}}\n"
- "DB.prototype.printShardingStatus = function(){\n"
- "printShardingStatus( this.getSisterDB( \"config\" ) );}\n"
- "if ( typeof Mongo == \"undefined\" ){\n"
- "Mongo = function( host ){\n"
- "this.init( host );}}\n"
- "if ( ! Mongo.prototype ){\n"
- "throw \"Mongo.prototype not defined\";}\n"
- "if ( ! Mongo.prototype.find )\n"
- "Mongo.prototype.find = function( ns , query , fields , limit , skip ){ throw \"find not implemented\"; }\n"
- "if ( ! Mongo.prototype.insert )\n"
- "Mongo.prototype.insert = function( ns , obj ){ throw \"insert not implemented\"; }\n"
- "if ( ! Mongo.prototype.remove )\n"
- "Mongo.prototype.remove = function( ns , pattern ){ throw \"remove not implemented;\" }\n"
- "if ( ! Mongo.prototype.update )\n"
- "Mongo.prototype.update = function( ns , query , obj , upsert ){ throw \"update not implemented;\" }\n"
- "if ( typeof mongoInject == \"function\" ){\n"
- "mongoInject( Mongo.prototype );}\n"
- "Mongo.prototype.setSlaveOk = function() {\n"
- "this.slaveOk = true;}\n"
- "Mongo.prototype.getDB = function( name ){\n"
- "return new DB( this , name );}\n"
- "Mongo.prototype.getDBs = function(){\n"
- "var res = this.getDB( \"admin\" ).runCommand( { \"listDatabases\" : 1 } );\n"
- "assert( res.ok == 1 , \"listDatabases failed:\" + tojson( res ) );\n"
- "return res;}\n"
- "Mongo.prototype.getDBNames = function(){\n"
- "return this.getDBs().databases.map(\n"
- "function(z){\n"
- "return z.name;}\n"
- ");}\n"
- "Mongo.prototype.getCollection = function(ns){\n"
- "var idx = ns.indexOf( \".\" );\n"
- "if ( idx < 0 )\n"
- "throw \"need . in ns\";\n"
- "var db = ns.substring( 0 , idx );\n"
- "var c = ns.substring( idx + 1 );\n"
- "return this.getDB( db ).getCollection( c );}\n"
- "Mongo.prototype.toString = function(){\n"
- "return \"connection to \" + this.host;}\n"
- "Mongo.prototype.tojson = Mongo.prototype.toString;\n"
- "connect = function( url , user , pass ){\n"
- "chatty( \"connecting to: \" + url )\n"
- "if ( user && ! pass )\n"
- "throw \"you specified a user and not a password. either you need a password, or you're using the old connect api\";\n"
- "var idx = url.lastIndexOf( \"/\" );\n"
- "var db;\n"
- "if ( idx < 0 )\n"
- "db = new Mongo().getDB( url );\n"
- "else\n"
- "db = new Mongo( url.substring( 0 , idx ) ).getDB( url.substring( idx + 1 ) );\n"
- "if ( user && pass ){\n"
- "if ( ! db.auth( user , pass ) ){\n"
- "throw \"couldn't login\";}}\n"
- "return db;}\n"
- "MR = {};\n"
- "MR.init = function(){\n"
- "$max = 0;\n"
- "$arr = [];\n"
- "emit = MR.emit;\n"
- "$numEmits = 0;\n"
- "$numReduces = 0;\n"
- "$numReducesToDB = 0;\n"
- "gc();}\n"
- "MR.cleanup = function(){\n"
- "MR.init();\n"
- "gc();}\n"
- "MR.emit = function(k,v){\n"
- "$numEmits++;\n"
- "var num = nativeHelper.apply( get_num_ , [ k ] );\n"
- "var data = $arr[num];\n"
- "if ( ! data ){\n"
- "data = { key : k , values : new Array(1000) , count : 0 };\n"
- "$arr[num] = data;}\n"
- "data.values[data.count++] = v;\n"
- "$max = Math.max( $max , data.count );}\n"
- "MR.doReduce = function( useDB ){\n"
- "$numReduces++;\n"
- "if ( useDB )\n"
- "$numReducesToDB++;\n"
- "$max = 0;\n"
- "for ( var i=0; i<$arr.length; i++){\n"
- "var data = $arr[i];\n"
- "if ( ! data )\n"
- "continue;\n"
- "if ( useDB ){\n"
- "var x = tempcoll.findOne( { _id : data.key } );\n"
- "if ( x ){\n"
- "data.values[data.count++] = x.value;}}\n"
- "var r = $reduce( data.key , data.values.slice( 0 , data.count ) );\n"
- "if ( r && r.length && r[0] ){\n"
- "data.values = r;\n"
- "data.count = r.length;}\n"
- "else{\n"
- "data.values[0] = r;\n"
- "data.count = 1;}\n"
- "$max = Math.max( $max , data.count );\n"
- "if ( useDB ){\n"
- "if ( data.count == 1 ){\n"
- "tempcoll.save( { _id : data.key , value : data.values[0] } );}\n"
- "else {\n"
- "tempcoll.save( { _id : data.key , value : data.values.slice( 0 , data.count ) } );}}}}\n"
- "MR.check = function(){\n"
- "if ( $max < 2000 && $arr.length < 1000 ){\n"
- "return 0;}\n"
- "MR.doReduce();\n"
- "if ( $max < 2000 && $arr.length < 1000 ){\n"
- "return 1;}\n"
- "MR.doReduce( true );\n"
- "$arr = [];\n"
- "$max = 0;\n"
- "reset_num();\n"
- "gc();\n"
- "return 2;}\n"
- "MR.finalize = function(){\n"
- "tempcoll.find().forEach(\n"
- "function(z){\n"
- "z.value = $finalize( z._id , z.value );\n"
- "tempcoll.save( z );}\n"
- ");}\n"
- "if ( typeof DBQuery == \"undefined\" ){\n"
- "DBQuery = function( mongo , db , collection , ns , query , fields , limit , skip , batchSize ){\n"
- "this._mongo = mongo;\n"
- "this._db = db;\n"
- "this._collection = collection;\n"
- "this._ns = ns;\n"
- "this._query = query || {};\n"
- "this._fields = fields;\n"
- "this._limit = limit || 0;\n"
- "this._skip = skip || 0;\n"
- "this._batchSize = batchSize || 0;\n"
- "this._cursor = null;\n"
- "this._numReturned = 0;\n"
- "this._special = false;\n"
- "this._prettyShell = false;}\n"
- "print( \"DBQuery probably won't have array access \" );}\n"
- "DBQuery.prototype.help = function () {\n"
- "print(\"find() modifiers\")\n"
- "print(\"\\t.sort( {...} )\")\n"
- "print(\"\\t.limit( n )\")\n"
- "print(\"\\t.skip( n )\")\n"
- "print(\"\\t.count() - total # of objects matching query, ignores skip,limit\")\n"
- "print(\"\\t.size() - total # of objects cursor would return, honors skip,limit\")\n"
- "print(\"\\t.explain([verbose])\")\n"
- "print(\"\\t.hint(...)\")\n"
- "print(\"\\t.showDiskLoc() - adds a $diskLoc field to each returned object\")\n"
- "print(\"\\nCursor methods\");\n"
- "print(\"\\t.forEach( func )\")\n"
- "print(\"\\t.print() - output to console in full pretty format\")\n"
- "print(\"\\t.map( func )\")\n"
- "print(\"\\t.hasNext()\")\n"
- "print(\"\\t.next()\")}\n"
- "DBQuery.prototype.clone = function(){\n"
- "var q = new DBQuery( this._mongo , this._db , this._collection , this._ns ,\n"
- "this._query , this._fields ,\n"
- "this._limit , this._skip , this._batchSize );\n"
- "q._special = this._special;\n"
- "return q;}\n"
- "DBQuery.prototype._ensureSpecial = function(){\n"
- "if ( this._special )\n"
- "return;\n"
- "var n = { query : this._query };\n"
- "this._query = n;\n"
- "this._special = true;}\n"
- "DBQuery.prototype._checkModify = function(){\n"
- "if ( this._cursor )\n"
- "throw \"query already executed\";}\n"
- "DBQuery.prototype._exec = function(){\n"
- "if ( ! this._cursor ){\n"
- "assert.eq( 0 , this._numReturned );\n"
- "this._cursor = this._mongo.find( this._ns , this._query , this._fields , this._limit , this._skip , this._batchSize );\n"
- "this._cursorSeen = 0;}\n"
- "return this._cursor;}\n"
- "DBQuery.prototype.limit = function( limit ){\n"
- "this._checkModify();\n"
- "this._limit = limit;\n"
- "return this;}\n"
- "DBQuery.prototype.batchSize = function( batchSize ){\n"
- "this._checkModify();\n"
- "this._batchSize = batchSize;\n"
- "return this;}\n"
- "DBQuery.prototype.skip = function( skip ){\n"
- "this._checkModify();\n"
- "this._skip = skip;\n"
- "return this;}\n"
- "DBQuery.prototype.hasNext = function(){\n"
- "this._exec();\n"
- "if ( this._limit > 0 && this._cursorSeen >= this._limit )\n"
- "return false;\n"
- "var o = this._cursor.hasNext();\n"
- "return o;}\n"
- "DBQuery.prototype.next = function(){\n"
- "this._exec();\n"
- "var o = this._cursor.hasNext();\n"
- "if ( o )\n"
- "this._cursorSeen++;\n"
- "else\n"
- "throw \"error hasNext: \" + o;\n"
- "var ret = this._cursor.next();\n"
- "if ( ret.$err && this._numReturned == 0 && ! this.hasNext() )\n"
- "throw \"error: \" + tojson( ret );\n"
- "this._numReturned++;\n"
- "return ret;}\n"
- "DBQuery.prototype.objsLeftInBatch = function(){\n"
- "this._exec();\n"
- "var ret = this._cursor.objsLeftInBatch();\n"
- "if ( ret.$err )\n"
- "throw \"error: \" + tojson( ret );\n"
- "return ret;}\n"
- "DBQuery.prototype.toArray = function(){\n"
- "if ( this._arr )\n"
- "return this._arr;\n"
- "var a = [];\n"
- "while ( this.hasNext() )\n"
- "a.push( this.next() );\n"
- "this._arr = a;\n"
- "return a;}\n"
- "DBQuery.prototype.count = function( applySkipLimit ){\n"
- "var cmd = { count: this._collection.getName() };\n"
- "if ( this._query ){\n"
- "if ( this._special )\n"
- "cmd.query = this._query.query;\n"
- "else\n"
- "cmd.query = this._query;}\n"
- "cmd.fields = this._fields || {};\n"
- "if ( applySkipLimit ){\n"
- "if ( this._limit )\n"
- "cmd.limit = this._limit;\n"
- "if ( this._skip )\n"
- "cmd.skip = this._skip;}\n"
- "var res = this._db.runCommand( cmd );\n"
- "if( res && res.n != null ) return res.n;\n"
- "throw \"count failed: \" + tojson( res );}\n"
- "DBQuery.prototype.size = function(){\n"
- "return this.count( true );}\n"
- "DBQuery.prototype.countReturn = function(){\n"
- "var c = this.count();\n"
- "if ( this._skip )\n"
- "c = c - this._skip;\n"
- "if ( this._limit > 0 && this._limit < c )\n"
- "return this._limit;\n"
- "return c;}\n"
- "\n"
- "DBQuery.prototype.itcount = function(){\n"
- "var num = 0;\n"
- "while ( this.hasNext() ){\n"
- "num++;\n"
- "this.next();}\n"
- "return num;}\n"
- "DBQuery.prototype.length = function(){\n"
- "return this.toArray().length;}\n"
- "DBQuery.prototype._addSpecial = function( name , value ){\n"
- "this._ensureSpecial();\n"
- "this._query[name] = value;\n"
- "return this;}\n"
- "DBQuery.prototype.sort = function( sortBy ){\n"
- "return this._addSpecial( \"orderby\" , sortBy );}\n"
- "DBQuery.prototype.hint = function( hint ){\n"
- "return this._addSpecial( \"$hint\" , hint );}\n"
- "DBQuery.prototype.min = function( min ) {\n"
- "return this._addSpecial( \"$min\" , min );}\n"
- "DBQuery.prototype.max = function( max ) {\n"
- "return this._addSpecial( \"$max\" , max );}\n"
- "DBQuery.prototype.showDiskLoc = function() {\n"
- "return this._addSpecial( \"$showDiskLoc\" , true);}\n"
- "DBQuery.prototype.forEach = function( func ){\n"
- "while ( this.hasNext() )\n"
- "func( this.next() );}\n"
- "DBQuery.prototype.map = function( func ){\n"
- "var a = [];\n"
- "while ( this.hasNext() )\n"
- "a.push( func( this.next() ) );\n"
- "return a;}\n"
- "DBQuery.prototype.arrayAccess = function( idx ){\n"
- "return this.toArray()[idx];}\n"
- "DBQuery.prototype.explain = function (verbose) {\n"
- "/* verbose=true --> include allPlans, oldPlan fields */\n"
- "var n = this.clone();\n"
- "n._ensureSpecial();\n"
- "n._query.$explain = true;\n"
- "n._limit = n._limit * -1;\n"
- "var e = n.next();\n"
- "if (!verbose) {\n"
- "delete e.allPlans;\n"
- "delete e.oldPlan;}\n"
- "return e;}\n"
- "DBQuery.prototype.snapshot = function(){\n"
- "this._ensureSpecial();\n"
- "this._query.$snapshot = true;\n"
- "return this;}\n"
- "DBQuery.prototype.pretty = function(){\n"
- "this._prettyShell = true;\n"
- "return this;}\n"
- "DBQuery.prototype.shellPrint = function(){\n"
- "try {\n"
- "var n = 0;\n"
- "while ( this.hasNext() && n < DBQuery.shellBatchSize ){\n"
- "var s = this._prettyShell ? tojson( this.next() ) : tojson( this.next() , \"\" , true );\n"
- "print( s );\n"
- "n++;}\n"
- "if ( this.hasNext() ){\n"
- "print( \"has more\" );\n"
- "___it___ = this;}\n"
- "else {\n"
- "___it___ = null;}}\n"
- "catch ( e ){\n"
- "print( e );}}\n"
- "DBQuery.prototype.toString = function(){\n"
- "return \"DBQuery: \" + this._ns + \" -> \" + tojson( this.query );}\n"
- "DBQuery.shellBatchSize = 20;\n"
- "// or db[\"colName\"]\n"
- "if ( ( typeof DBCollection ) == \"undefined\" ){\n"
- "DBCollection = function( mongo , db , shortName , fullName ){\n"
- "this._mongo = mongo;\n"
- "this._db = db;\n"
- "this._shortName = shortName;\n"
- "this._fullName = fullName;\n"
- "this.verify();}}\n"
- "DBCollection.prototype.verify = function(){\n"
- "assert( this._fullName , \"no fullName\" );\n"
- "assert( this._shortName , \"no shortName\" );\n"
- "assert( this._db , \"no db\" );\n"
- "assert.eq( this._fullName , this._db._name + \".\" + this._shortName , \"name mismatch\" );\n"
- "assert( this._mongo , \"no mongo in DBCollection\" );}\n"
- "DBCollection.prototype.getName = function(){\n"
- "return this._shortName;}\n"
- "DBCollection.prototype.help = function () {\n"
- "var shortName = this.getName();\n"
- "print(\"DBCollection help\");\n"
- "print(\"\\tdb.\" + shortName + \".find().help() - show DBCursor help\");\n"
- "print(\"\\tdb.\" + shortName + \".count()\");\n"
- "print(\"\\tdb.\" + shortName + \".dataSize()\");\n"
- "print(\"\\tdb.\" + shortName + \".distinct( key ) - eg. db.\" + shortName + \".distinct( 'x' )\");\n"
- "print(\"\\tdb.\" + shortName + \".drop() drop the collection\");\n"
- "print(\"\\tdb.\" + shortName + \".dropIndex(name)\");\n"
- "print(\"\\tdb.\" + shortName + \".dropIndexes()\");\n"
- "print(\"\\tdb.\" + shortName + \".ensureIndex(keypattern,options) - options should be an object with these possible fields: name, unique, dropDups\");\n"
- "print(\"\\tdb.\" + shortName + \".reIndex()\");\n"
- "print(\"\\tdb.\" + shortName + \".find( [query] , [fields]) - first parameter is an optional query filter. second parameter is optional set of fields to return.\");\n"
- "print(\"\\t e.g. db.\" + shortName + \".find( { x : 77 } , { name : 1 , x : 1 } )\");\n"
- "print(\"\\tdb.\" + shortName + \".find(...).count()\");\n"
- "print(\"\\tdb.\" + shortName + \".find(...).limit(n)\");\n"
- "print(\"\\tdb.\" + shortName + \".find(...).skip(n)\");\n"
- "print(\"\\tdb.\" + shortName + \".find(...).sort(...)\");\n"
- "print(\"\\tdb.\" + shortName + \".findOne([query])\");\n"
- "print(\"\\tdb.\" + shortName + \".findAndModify( { update : ... , remove : bool [, query: {}, sort: {}, 'new': false] } )\");\n"
- "print(\"\\tdb.\" + shortName + \".getDB() get DB object associated with collection\");\n"
- "print(\"\\tdb.\" + shortName + \".getIndexes()\");\n"
- "print(\"\\tdb.\" + shortName + \".group( { key : ..., initial: ..., reduce : ...[, cond: ...] } )\");\n"
- "print(\"\\tdb.\" + shortName + \".mapReduce( mapFunction , reduceFunction , <optional params> )\");\n"
- "print(\"\\tdb.\" + shortName + \".remove(query)\");\n"
- "print(\"\\tdb.\" + shortName + \".renameCollection( newName , <dropTarget> ) renames the collection.\");\n"
- "print(\"\\tdb.\" + shortName + \".runCommand( name , <options> ) runs a db command with the given name where the first param is the collection name\");\n"
- "print(\"\\tdb.\" + shortName + \".save(obj)\");\n"
- "print(\"\\tdb.\" + shortName + \".stats()\");\n"
- "print(\"\\tdb.\" + shortName + \".storageSize() - includes free space allocated to this collection\");\n"
- "print(\"\\tdb.\" + shortName + \".totalIndexSize() - size in bytes of all the indexes\");\n"
- "print(\"\\tdb.\" + shortName + \".totalSize() - storage allocated for all data and indexes\");\n"
- "print(\"\\tdb.\" + shortName + \".update(query, object[, upsert_bool, multi_bool])\");\n"
- "print(\"\\tdb.\" + shortName + \".validate() - SLOW\");\n"
- "print(\"\\tdb.\" + shortName + \".getShardVersion() - only for use with sharding\");\n"
- "return __magicNoPrint;}\n"
- "DBCollection.prototype.getFullName = function(){\n"
- "return this._fullName;}\n"
- "DBCollection.prototype.getDB = function(){\n"
- "return this._db;}\n"
- "DBCollection.prototype._dbCommand = function( cmd , params ){\n"
- "if ( typeof( cmd ) == \"object\" )\n"
- "return this._db._dbCommand( cmd );\n"
- "var c = {};\n"
- "c[cmd] = this.getName();\n"
- "if ( params )\n"
- "Object.extend( c , params );\n"
- "return this._db._dbCommand( c );}\n"
- "DBCollection.prototype.runCommand = DBCollection.prototype._dbCommand;\n"
- "DBCollection.prototype._massageObject = function( q ){\n"
- "if ( ! q )\n"
- "return {};\n"
- "var type = typeof q;\n"
- "if ( type == \"function\" )\n"
- "return { $where : q };\n"
- "if ( q.isObjectId )\n"
- "return { _id : q };\n"
- "if ( type == \"object\" )\n"
- "return q;\n"
- "if ( type == \"string\" ){\n"
- "if ( q.length == 24 )\n"
- "return { _id : q };\n"
- "return { $where : q };}\n"
- "throw \"don't know how to massage : \" + type;}\n"
- "DBCollection.prototype._validateObject = function( o ){\n"
- "if ( o._ensureSpecial && o._checkModify )\n"
- "throw \"can't save a DBQuery object\";}\n"
- "DBCollection._allowedFields = { $id : 1 , $ref : 1 };\n"
- "DBCollection.prototype._validateForStorage = function( o ){\n"
- "this._validateObject( o );\n"
- "for ( var k in o ){\n"
- "if ( k.indexOf( \".\" ) >= 0 ) {\n"
- "throw \"can't have . in field names [\" + k + \"]\" ;}\n"
- "if ( k.indexOf( \"$\" ) == 0 && ! DBCollection._allowedFields[k] ) {\n"
- "throw \"field names cannot start with $ [\" + k + \"]\";}\n"
- "if ( o[k] !== null && typeof( o[k] ) === \"object\" ) {\n"
- "this._validateForStorage( o[k] );}}\n"
- "};\n"
- "DBCollection.prototype.find = function( query , fields , limit , skip ){\n"
- "return new DBQuery( this._mongo , this._db , this ,\n"
- "this._fullName , this._massageObject( query ) , fields , limit , skip );}\n"
- "DBCollection.prototype.findOne = function( query , fields ){\n"
- "var cursor = this._mongo.find( this._fullName , this._massageObject( query ) || {} , fields , -1 , 0 , 0 );\n"
- "if ( ! cursor.hasNext() )\n"
- "return null;\n"
- "var ret = cursor.next();\n"
- "if ( cursor.hasNext() ) throw \"findOne has more than 1 result!\";\n"
- "if ( ret.$err )\n"
- "throw \"error \" + tojson( ret );\n"
- "return ret;}\n"
- "DBCollection.prototype.insert = function( obj , _allow_dot ){\n"
- "if ( ! obj )\n"
- "throw \"no object passed to insert!\";\n"
- "if ( ! _allow_dot ) {\n"
- "this._validateForStorage( obj );}\n"
- "if ( typeof( obj._id ) == \"undefined\" ){\n"
- "var tmp = obj;\n"
- "obj = {_id: new ObjectId()};\n"
- "for (var key in tmp){\n"
- "obj[key] = tmp[key];}}\n"
- "this._mongo.insert( this._fullName , obj );\n"
- "this._lastID = obj._id;}\n"
- "DBCollection.prototype.remove = function( t ){\n"
- "this._mongo.remove( this._fullName , this._massageObject( t ) );}\n"
- "DBCollection.prototype.update = function( query , obj , upsert , multi ){\n"
- "assert( query , \"need a query\" );\n"
- "assert( obj , \"need an object\" );\n"
- "this._validateObject( obj );\n"
- "this._mongo.update( this._fullName , query , obj , upsert ? true : false , multi ? true : false );}\n"
- "DBCollection.prototype.save = function( obj ){\n"
- "if ( obj == null || typeof( obj ) == \"undefined\" )\n"
- "throw \"can't save a null\";\n"
- "if ( typeof( obj._id ) == \"undefined\" ){\n"
- "obj._id = new ObjectId();\n"
- "return this.insert( obj );}\n"
- "else {\n"
- "return this.update( { _id : obj._id } , obj , true );}}\n"
- "DBCollection.prototype._genIndexName = function( keys ){\n"
- "var name = \"\";\n"
- "for ( var k in keys ){\n"
- "var v = keys[k];\n"
- "if ( typeof v == \"function\" )\n"
- "continue;\n"
- "if ( name.length > 0 )\n"
- "name += \"_\";\n"
- "name += k + \"_\";\n"
- "if ( typeof v == \"number\" )\n"
- "name += v;}\n"
- "return name;}\n"
- "DBCollection.prototype._indexSpec = function( keys, options ) {\n"
- "var ret = { ns : this._fullName , key : keys , name : this._genIndexName( keys ) };\n"
- "if ( ! options ){}\n"
- "else if ( typeof ( options ) == \"string\" )\n"
- "ret.name = options;\n"
- "else if ( typeof ( options ) == \"boolean\" )\n"
- "ret.unique = true;\n"
- "else if ( typeof ( options ) == \"object\" ){\n"
- "if ( options.length ){\n"
- "var nb = 0;\n"
- "for ( var i=0; i<options.length; i++ ){\n"
- "if ( typeof ( options[i] ) == \"string\" )\n"
- "ret.name = options[i];\n"
- "else if ( typeof( options[i] ) == \"boolean\" ){\n"
- "if ( options[i] ){\n"
- "if ( nb == 0 )\n"
- "ret.unique = true;\n"
- "if ( nb == 1 )\n"
- "ret.dropDups = true;}\n"
- "nb++;}}}\n"
- "else {\n"
- "Object.extend( ret , options );}}\n"
- "else {\n"
- "throw \"can't handle: \" + typeof( options );}\n"
- "/*\n"
- "return ret;\n"
- "var name;\n"
- "var nTrue = 0;\n"
- "if ( ! isObject( options ) ) {\n"
- "options = [ options ];}\n"
- "if ( options.length ){\n"
- "for( var i = 0; i < options.length; ++i ) {\n"
- "var o = options[ i ];\n"
- "if ( isString( o ) ) {\n"
- "ret.name = o;\n"
- "} else if ( typeof( o ) == \"boolean\" ) {\n"
- "if ( o ) {\n"
- "++nTrue;}}}\n"
- "if ( nTrue > 0 ) {\n"
- "ret.unique = true;}\n"
- "if ( nTrue > 1 ) {\n"
- "ret.dropDups = true;}}\n"
- "*/\n"
- "return ret;}\n"
- "DBCollection.prototype.createIndex = function( keys , options ){\n"
- "var o = this._indexSpec( keys, options );\n"
- "this._db.getCollection( \"system.indexes\" ).insert( o , true );}\n"
- "DBCollection.prototype.ensureIndex = function( keys , options ){\n"
- "var name = this._indexSpec( keys, options ).name;\n"
- "this._indexCache = this._indexCache || {};\n"
- "if ( this._indexCache[ name ] ){\n"
- "return;}\n"
- "this.createIndex( keys , options );\n"
- "if ( this.getDB().getLastError() == \"\" ) {\n"
- "this._indexCache[name] = true;}}\n"
- "DBCollection.prototype.resetIndexCache = function(){\n"
- "this._indexCache = {};}\n"
- "DBCollection.prototype.reIndex = function() {\n"
- "return this._db.runCommand({ reIndex: this.getName() });}\n"
- "DBCollection.prototype.dropIndexes = function(){\n"
- "this.resetIndexCache();\n"
- "var res = this._db.runCommand( { deleteIndexes: this.getName(), index: \"*\" } );\n"
- "assert( res , \"no result from dropIndex result\" );\n"
- "if ( res.ok )\n"
- "return res;\n"
- "if ( res.errmsg.match( /not found/ ) )\n"
- "return res;\n"
- "throw \"error dropping indexes : \" + tojson( res );}\n"
- "DBCollection.prototype.drop = function(){\n"
- "this.resetIndexCache();\n"
- "var ret = this._db.runCommand( { drop: this.getName() } );\n"
- "if ( ! ret.ok ){\n"
- "if ( ret.errmsg == \"ns not found\" )\n"
- "return false;\n"
- "throw \"drop failed: \" + tojson( ret );}\n"
- "return true;}\n"
- "DBCollection.prototype.findAndModify = function(args){\n"
- "var cmd = { findandmodify: this.getName() };\n"
- "for (var key in args){\n"
- "cmd[key] = args[key];}\n"
- "var ret = this._db.runCommand( cmd );\n"
- "if ( ! ret.ok ){\n"
- "if (ret.errmsg == \"No matching object found\"){\n"
- "return null;}\n"
- "throw \"findAndModifyFailed failed: \" + tojson( ret.errmsg );}\n"
- "return ret.value;}\n"
- "DBCollection.prototype.renameCollection = function( newName , dropTarget ){\n"
- "return this._db._adminCommand( { renameCollection : this._fullName ,\n"
- "to : this._db._name + \".\" + newName ,\n"
- "dropTarget : dropTarget } )}\n"
- "DBCollection.prototype.validate = function() {\n"
- "var res = this._db.runCommand( { validate: this.getName() } );\n"
- "res.valid = false;\n"
- "var raw = res.result || res.raw;\n"
- "if ( raw ){\n"
- "var str = \"-\" + tojson( raw );\n"
- "res.valid = ! ( str.match( /exception/ ) || str.match( /corrupt/ ) );\n"
- "var p = /lastExtentSize:(\\d+)/;\n"
- "var r = p.exec( str );\n"
- "if ( r ){\n"
- "res.lastExtentSize = Number( r[1] );}}\n"
- "return res;}\n"
- "DBCollection.prototype.getShardVersion = function(){\n"
- "return this._db._adminCommand( { getShardVersion : this._fullName } );}\n"
- "DBCollection.prototype.getIndexes = function(){\n"
- "return this.getDB().getCollection( \"system.indexes\" ).find( { ns : this.getFullName() } ).toArray();}\n"
- "DBCollection.prototype.getIndices = DBCollection.prototype.getIndexes;\n"
- "DBCollection.prototype.getIndexSpecs = DBCollection.prototype.getIndexes;\n"
- "DBCollection.prototype.getIndexKeys = function(){\n"
- "return this.getIndexes().map(\n"
- "function(i){\n"
- "return i.key;}\n"
- ");}\n"
- "DBCollection.prototype.count = function( x ){\n"
- "return this.find( x ).count();}\n"
- "\n"
- "DBCollection.prototype.clean = function() {\n"
- "return this._dbCommand( { clean: this.getName() } );}\n"
- "\n"
- "DBCollection.prototype.dropIndex = function(index) {\n"
- "assert(index , \"need to specify index to dropIndex\" );\n"
- "if ( ! isString( index ) && isObject( index ) )\n"
- "index = this._genIndexName( index );\n"
- "var res = this._dbCommand( \"deleteIndexes\" ,{ index: index } );\n"
- "this.resetIndexCache();\n"
- "return res;}\n"
- "DBCollection.prototype.copyTo = function( newName ){\n"
- "return this.getDB().eval(\n"
- "function( collName , newName ){\n"
- "var from = db[collName];\n"
- "var to = db[newName];\n"
- "to.ensureIndex( { _id : 1 } );\n"
- "var count = 0;\n"
- "var cursor = from.find();\n"
- "while ( cursor.hasNext() ){\n"
- "var o = cursor.next();\n"
- "count++;\n"
- "to.save( o );}\n"
- "return count;\n"
- "} , this.getName() , newName\n"
- ");}\n"
- "DBCollection.prototype.getCollection = function( subName ){\n"
- "return this._db.getCollection( this._shortName + \".\" + subName );}\n"
- "DBCollection.prototype.stats = function( scale ){\n"
- "return this._db.runCommand( { collstats : this._shortName , scale : scale } );}\n"
- "DBCollection.prototype.dataSize = function(){\n"
- "return this.stats().size;}\n"
- "DBCollection.prototype.storageSize = function(){\n"
- "return this.stats().storageSize;}\n"
- "DBCollection.prototype.totalIndexSize = function( verbose ){\n"
- "var stats = this.stats();\n"
- "if (verbose){\n"
- "for (var ns in stats.indexSizes){\n"
- "print( ns + \"\\t\" + stats.indexSizes[ns] );}}\n"
- "return stats.totalIndexSize;}\n"
- "DBCollection.prototype.totalSize = function(){\n"
- "var total = this.storageSize();\n"
- "var mydb = this._db;\n"
- "var shortName = this._shortName;\n"
- "this.getIndexes().forEach(\n"
- "function( spec ){\n"
- "var coll = mydb.getCollection( shortName + \".$\" + spec.name );\n"
- "var mysize = coll.storageSize();\n"
- "//print( coll + \"\\t\" + mysize + \"\\t\" + tojson( coll.validate() ) );\n"
- "total += coll.dataSize();}\n"
- ");\n"
- "return total;}\n"
- "DBCollection.prototype.convertToCapped = function( bytes ){\n"
- "if ( ! bytes )\n"
- "throw \"have to specify # of bytes\";\n"
- "return this._dbCommand( { convertToCapped : this._shortName , size : bytes } )}\n"
- "DBCollection.prototype.exists = function(){\n"
- "return this._db.system.namespaces.findOne( { name : this._fullName } );}\n"
- "DBCollection.prototype.isCapped = function(){\n"
- "var e = this.exists();\n"
- "return ( e && e.options && e.options.capped ) ? true : false;}\n"
- "DBCollection.prototype.distinct = function( keyString , query ){\n"
- "var res = this._dbCommand( { distinct : this._shortName , key : keyString , query : query || {} } );\n"
- "if ( ! res.ok )\n"
- "throw \"distinct failed: \" + tojson( res );\n"
- "return res.values;}\n"
- "DBCollection.prototype.group = function( params ){\n"
- "params.ns = this._shortName;\n"
- "return this._db.group( params );}\n"
- "DBCollection.prototype.groupcmd = function( params ){\n"
- "params.ns = this._shortName;\n"
- "return this._db.groupcmd( params );}\n"
- "MapReduceResult = function( db , o ){\n"
- "Object.extend( this , o );\n"
- "this._o = o;\n"
- "this._keys = Object.keySet( o );\n"
- "this._db = db;\n"
- "this._coll = this._db.getCollection( this.result );}\n"
- "MapReduceResult.prototype._simpleKeys = function(){\n"
- "return this._o;}\n"
- "MapReduceResult.prototype.find = function(){\n"
- "return DBCollection.prototype.find.apply( this._coll , arguments );}\n"
- "MapReduceResult.prototype.drop = function(){\n"
- "return this._coll.drop();}\n"
- "\n"
- "MapReduceResult.prototype.convertToSingleObject = function(){\n"
- "var z = {};\n"
- "this._coll.find().forEach( function(a){ z[a._id] = a.value; } );\n"
- "return z;}\n"
- "\n"
- "DBCollection.prototype.mapReduce = function( map , reduce , optional ){\n"
- "var c = { mapreduce : this._shortName , map : map , reduce : reduce };\n"
- "if ( optional )\n"
- "Object.extend( c , optional );\n"
- "var raw = this._db.runCommand( c );\n"
- "if ( ! raw.ok )\n"
- "throw \"map reduce failed: \" + tojson( raw );\n"
- "return new MapReduceResult( this._db , raw );}\n"
- "DBCollection.prototype.toString = function(){\n"
- "return this.getFullName();}\n"
- "DBCollection.prototype.toString = function(){\n"
- "return this.getFullName();}\n"
- "DBCollection.prototype.tojson = DBCollection.prototype.toString;\n"
- "DBCollection.prototype.shellPrint = DBCollection.prototype.toString;\n"
- ;
-
+const char * jsconcatcode =
+"__quiet = false;\n"
+ "__magicNoPrint = { __magicNoPrint : 1111 }\n"
+ "chatty = function(s){\n"
+ "if ( ! __quiet )\n"
+ "print( s );}\n"
+ "friendlyEqual = function( a , b ){\n"
+ "if ( a == b )\n"
+ "return true;\n"
+ "if ( tojson( a ) == tojson( b ) )\n"
+ "return true;\n"
+ "return false;}\n"
+ "doassert = function (msg) {\n"
+ "if (msg.indexOf(\"assert\") == 0)\n"
+ "print(msg);\n"
+ "else\n"
+ "print(\"assert: \" + msg);\n"
+ "throw msg;}\n"
+ "assert = function( b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( b )\n"
+ "return;\n"
+ "doassert( msg == undefined ? \"assert failed\" : \"assert failed : \" + msg );}\n"
+ "assert.automsg = function( b ) {\n"
+ "assert( eval( b ), b );}\n"
+ "assert._debug = false;\n"
+ "assert.eq = function( a , b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( a == b )\n"
+ "return;\n"
+ "if ( ( a != null && b != null ) && friendlyEqual( a , b ) )\n"
+ "return;\n"
+ "doassert( \"[\" + tojson( a ) + \"] != [\" + tojson( b ) + \"] are not equal : \" + msg );}\n"
+ "assert.eq.automsg = function( a, b ) {\n"
+ "assert.eq( eval( a ), eval( b ), \"[\" + a + \"] != [\" + b + \"]\" );}\n"
+ "assert.neq = function( a , b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( a != b )\n"
+ "return;\n"
+ "doassert( \"[\" + a + \"] != [\" + b + \"] are equal : \" + msg );}\n"
+ "assert.repeat = function( f, msg, timeout, interval ) {\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "var start = new Date();\n"
+ "timeout = timeout || 30000;\n"
+ "interval = interval || 200;\n"
+ "var last;\n"
+ "while( 1 ) {\n"
+ "if ( typeof( f ) == \"string\" ){\n"
+ "if ( eval( f ) )\n"
+ "return;}\n"
+ "else {\n"
+ "if ( f() )\n"
+ "return;}\n"
+ "if ( ( new Date() ).getTime() - start.getTime() > timeout )\n"
+ "break;\n"
+ "sleep( interval );}}\n"
+ "assert.soon = function( f, msg, timeout, interval ) {\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "var start = new Date();\n"
+ "timeout = timeout || 30000;\n"
+ "interval = interval || 200;\n"
+ "var last;\n"
+ "while( 1 ) {\n"
+ "if ( typeof( f ) == \"string\" ){\n"
+ "if ( eval( f ) )\n"
+ "return;}\n"
+ "else {\n"
+ "if ( f() )\n"
+ "return;}\n"
+ "if ( ( new Date() ).getTime() - start.getTime() > timeout )\n"
+ "doassert( \"assert.soon failed: \" + f + \", msg:\" + msg );\n"
+ "sleep( interval );}}\n"
+ "assert.throws = function( func , params , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "try {\n"
+ "func.apply( null , params );}\n"
+ "catch ( e ){\n"
+ "return e;}\n"
+ "doassert( \"did not throw exception: \" + msg );}\n"
+ "assert.throws.automsg = function( func, params ) {\n"
+ "assert.throws( func, params, func.toString() );}\n"
+ "assert.commandWorked = function( res , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( res.ok == 1 )\n"
+ "return;\n"
+ "doassert( \"command failed: \" + tojson( res ) + \" : \" + msg );}\n"
+ "assert.commandFailed = function( res , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( res.ok == 0 )\n"
+ "return;\n"
+ "doassert( \"command worked when it should have failed: \" + tojson( res ) + \" : \" + msg );}\n"
+ "assert.isnull = function( what , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( what == null )\n"
+ "return;\n"
+ "doassert( \"supposed to null (\" + ( msg || \"\" ) + \") was: \" + tojson( what ) );}\n"
+ "assert.lt = function( a , b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( a < b )\n"
+ "return;\n"
+ "doassert( a + \" is not less than \" + b + \" : \" + msg );}\n"
+ "assert.gt = function( a , b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( a > b )\n"
+ "return;\n"
+ "doassert( a + \" is not greater than \" + b + \" : \" + msg );}\n"
+ "assert.lte = function( a , b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( a <= b )\n"
+ "return;\n"
+ "doassert( a + \" is not less than or eq \" + b + \" : \" + msg );}\n"
+ "assert.gte = function( a , b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( a >= b )\n"
+ "return;\n"
+ "doassert( a + \" is not greater than or eq \" + b + \" : \" + msg );}\n"
+ "assert.close = function( a , b , msg , places ){\n"
+ "if (places === undefined) {\n"
+ "places = 4;}\n"
+ "if (Math.round((a - b) * Math.pow(10, places)) === 0) {\n"
+ "return;}\n"
+ "doassert( a + \" is not equal to \" + b + \" within \" + places +\n"
+ "\" places, diff: \" + (a-b) + \" : \" + msg );\n"
+ "};\n"
+ "Object.extend = function( dst , src , deep ){\n"
+ "for ( var k in src ){\n"
+ "var v = src[k];\n"
+ "if ( deep && typeof(v) == \"object\" ){\n"
+ "v = Object.extend( typeof ( v.length ) == \"number\" ? [] : {} , v , true );}\n"
+ "dst[k] = v;}\n"
+ "return dst;}\n"
+ "argumentsToArray = function( a ){\n"
+ "var arr = [];\n"
+ "for ( var i=0; i<a.length; i++ )\n"
+ "arr[i] = a[i];\n"
+ "return arr;}\n"
+ "isString = function( x ){\n"
+ "return typeof( x ) == \"string\";}\n"
+ "isNumber = function(x){\n"
+ "return typeof( x ) == \"number\";}\n"
+ "isObject = function( x ){\n"
+ "return typeof( x ) == \"object\";}\n"
+ "String.prototype.trim = function() {\n"
+ "return this.replace(/^\\s+|\\s+$/g,\"\");}\n"
+ "String.prototype.ltrim = function() {\n"
+ "return this.replace(/^\\s+/,\"\");}\n"
+ "String.prototype.rtrim = function() {\n"
+ "return this.replace(/\\s+$/,\"\");}\n"
+ "Date.timeFunc = function( theFunc , numTimes ){\n"
+ "var start = new Date();\n"
+ "numTimes = numTimes || 1;\n"
+ "for ( var i=0; i<numTimes; i++ ){\n"
+ "theFunc.apply( null , argumentsToArray( arguments ).slice( 2 ) );}\n"
+ "return (new Date()).getTime() - start.getTime();}\n"
+ "Date.prototype.tojson = function(){\n"
+ "return \"\\\"\" + this.toString() + \"\\\"\";}\n"
+ "RegExp.prototype.tojson = RegExp.prototype.toString;\n"
+ "Array.contains = function( a , x ){\n"
+ "for ( var i=0; i<a.length; i++ ){\n"
+ "if ( a[i] == x )\n"
+ "return true;}\n"
+ "return false;}\n"
+ "Array.unique = function( a ){\n"
+ "var u = [];\n"
+ "for ( var i=0; i<a.length; i++){\n"
+ "var o = a[i];\n"
+ "if ( ! Array.contains( u , o ) ){\n"
+ "u.push( o );}}\n"
+ "return u;}\n"
+ "Array.shuffle = function( arr ){\n"
+ "for ( var i=0; i<arr.length-1; i++ ){\n"
+ "var pos = i+Random.randInt(arr.length-i);\n"
+ "var save = arr[i];\n"
+ "arr[i] = arr[pos];\n"
+ "arr[pos] = save;}\n"
+ "return arr;}\n"
+ "Array.tojson = function( a , indent ){\n"
+ "if (!indent)\n"
+ "indent = \"\";\n"
+ "if (a.length == 0) {\n"
+ "return \"[ ]\";}\n"
+ "var s = \"[\\n\";\n"
+ "indent += \"\\t\";\n"
+ "for ( var i=0; i<a.length; i++){\n"
+ "s += indent + tojson( a[i], indent );\n"
+ "if ( i < a.length - 1 ){\n"
+ "s += \",\\n\";}}\n"
+ "if ( a.length == 0 ) {\n"
+ "s += indent;}\n"
+ "indent = indent.substring(1);\n"
+ "s += \"\\n\"+indent+\"]\";\n"
+ "return s;}\n"
+ "Array.fetchRefs = function( arr , coll ){\n"
+ "var n = [];\n"
+ "for ( var i=0; i<arr.length; i ++){\n"
+ "var z = arr[i];\n"
+ "if ( coll && coll != z.getCollection() )\n"
+ "continue;\n"
+ "n.push( z.fetch() );}\n"
+ "return n;}\n"
+ "Array.sum = function( arr ){\n"
+ "if ( arr.length == 0 )\n"
+ "return null;\n"
+ "var s = arr[0];\n"
+ "for ( var i=1; i<arr.length; i++ )\n"
+ "s += arr[i];\n"
+ "return s;}\n"
+ "Array.avg = function( arr ){\n"
+ "if ( arr.length == 0 )\n"
+ "return null;\n"
+ "return Array.sum( arr ) / arr.length;}\n"
+ "Array.stdDev = function( arr ){\n"
+ "var avg = Array.avg( arr );\n"
+ "var sum = 0;\n"
+ "for ( var i=0; i<arr.length; i++ ){\n"
+ "sum += Math.pow( arr[i] - avg , 2 );}\n"
+ "return Math.sqrt( sum / arr.length );}\n"
+ "Object.keySet = function( o ) {\n"
+ "var ret = new Array();\n"
+ "for( i in o ) {\n"
+ "if ( !( i in o.__proto__ && o[ i ] === o.__proto__[ i ] ) ) {\n"
+ "ret.push( i );}}\n"
+ "return ret;}\n"
+ "if ( ! NumberLong.prototype ) {\n"
+ "NumberLong.prototype = {}}\n"
+ "NumberLong.prototype.tojson = function() {\n"
+ "return this.toString();}\n"
+ "if ( ! ObjectId.prototype )\n"
+ "ObjectId.prototype = {}\n"
+ "ObjectId.prototype.toString = function(){\n"
+ "return this.str;}\n"
+ "ObjectId.prototype.tojson = function(){\n"
+ "return \"ObjectId(\\\"\" + this.str + \"\\\")\";}\n"
+ "ObjectId.prototype.isObjectId = true;\n"
+ "ObjectId.prototype.getTimestamp = function(){\n"
+ "return new Date(parseInt(this.toString().slice(0,8), 16)*1000);}\n"
+ "ObjectId.prototype.equals = function( other){\n"
+ "return this.str == other.str;}\n"
+ "if ( typeof( DBPointer ) != \"undefined\" ){\n"
+ "DBPointer.prototype.fetch = function(){\n"
+ "assert( this.ns , \"need a ns\" );\n"
+ "assert( this.id , \"need an id\" );\n"
+ "return db[ this.ns ].findOne( { _id : this.id } );}\n"
+ "DBPointer.prototype.tojson = function(indent){\n"
+ "return tojson({\"ns\" : this.ns, \"id\" : this.id}, indent);}\n"
+ "DBPointer.prototype.getCollection = function(){\n"
+ "return this.ns;}\n"
+ "DBPointer.prototype.toString = function(){\n"
+ "return \"DBPointer \" + this.ns + \":\" + this.id;}}\n"
+ "else {\n"
+ "print( \"warning: no DBPointer\" );}\n"
+ "if ( typeof( DBRef ) != \"undefined\" ){\n"
+ "DBRef.prototype.fetch = function(){\n"
+ "assert( this.$ref , \"need a ns\" );\n"
+ "assert( this.$id , \"need an id\" );\n"
+ "return db[ this.$ref ].findOne( { _id : this.$id } );}\n"
+ "DBRef.prototype.tojson = function(indent){\n"
+ "return tojson({\"$ref\" : this.$ref, \"$id\" : this.$id}, indent);}\n"
+ "DBRef.prototype.getCollection = function(){\n"
+ "return this.$ref;}\n"
+ "DBRef.prototype.toString = function(){\n"
+ "return this.tojson();}}\n"
+ "else {\n"
+ "print( \"warning: no DBRef\" );}\n"
+ "if ( typeof( BinData ) != \"undefined\" ){\n"
+ "BinData.prototype.tojson = function () {\n"
+ "//return \"BinData type: \" + this.type + \" len: \" + this.len;\n"
+ "return this.toString();}}\n"
+ "else {\n"
+ "print( \"warning: no BinData class\" );}\n"
+ "if ( typeof( UUID ) != \"undefined\" ){\n"
+ "UUID.prototype.tojson = function () {\n"
+ "return this.toString();}}\n"
+ "if ( typeof _threadInject != \"undefined\" ){\n"
+ "print( \"fork() available!\" );\n"
+ "Thread = function(){\n"
+ "this.init.apply( this, arguments );}\n"
+ "_threadInject( Thread.prototype );\n"
+ "ScopedThread = function() {\n"
+ "this.init.apply( this, arguments );}\n"
+ "ScopedThread.prototype = new Thread( function() {} );\n"
+ "_scopedThreadInject( ScopedThread.prototype );\n"
+ "fork = function() {\n"
+ "var t = new Thread( function() {} );\n"
+ "Thread.apply( t, arguments );\n"
+ "return t;}\n"
+ "EventGenerator = function( me, collectionName, mean ) {\n"
+ "this.mean = mean;\n"
+ "this.events = new Array( me, collectionName );}\n"
+ "EventGenerator.prototype._add = function( action ) {\n"
+ "this.events.push( [ Random.genExp( this.mean ), action ] );}\n"
+ "EventGenerator.prototype.addInsert = function( obj ) {\n"
+ "this._add( \"t.insert( \" + tojson( obj ) + \" )\" );}\n"
+ "EventGenerator.prototype.addRemove = function( obj ) {\n"
+ "this._add( \"t.remove( \" + tojson( obj ) + \" )\" );}\n"
+ "EventGenerator.prototype.addUpdate = function( objOld, objNew ) {\n"
+ "this._add( \"t.update( \" + tojson( objOld ) + \", \" + tojson( objNew ) + \" )\" );}\n"
+ "EventGenerator.prototype.addCheckCount = function( count, query, shouldPrint, checkQuery ) {\n"
+ "query = query || {};\n"
+ "shouldPrint = shouldPrint || false;\n"
+ "checkQuery = checkQuery || false;\n"
+ "var action = \"assert.eq( \" + count + \", t.count( \" + tojson( query ) + \" ) );\"\n"
+ "if ( checkQuery ) {\n"
+ "action += \" assert.eq( \" + count + \", t.find( \" + tojson( query ) + \" ).toArray().length );\"}\n"
+ "if ( shouldPrint ) {\n"
+ "action += \" print( me + ' ' + \" + count + \" );\";}\n"
+ "this._add( action );}\n"
+ "EventGenerator.prototype.getEvents = function() {\n"
+ "return this.events;}\n"
+ "EventGenerator.dispatch = function() {\n"
+ "var args = argumentsToArray( arguments );\n"
+ "var me = args.shift();\n"
+ "var collectionName = args.shift();\n"
+ "var m = new Mongo( db.getMongo().host );\n"
+ "var t = m.getDB( \"test\" )[ collectionName ];\n"
+ "for( var i in args ) {\n"
+ "sleep( args[ i ][ 0 ] );\n"
+ "eval( args[ i ][ 1 ] );}}\n"
+ "ParallelTester = function() {\n"
+ "this.params = new Array();}\n"
+ "ParallelTester.prototype.add = function( fun, args ) {\n"
+ "args = args || [];\n"
+ "args.unshift( fun );\n"
+ "this.params.push( args );}\n"
+ "ParallelTester.prototype.run = function( msg, newScopes ) {\n"
+ "newScopes = newScopes || false;\n"
+ "assert.parallelTests( this.params, msg, newScopes );}\n"
+ "ParallelTester.createJstestsLists = function( n ) {\n"
+ "var params = new Array();\n"
+ "for( var i = 0; i < n; ++i ) {\n"
+ "params.push( [] );}\n"
+ "var makeKeys = function( a ) {\n"
+ "var ret = {};\n"
+ "for( var i in a ) {\n"
+ "ret[ a[ i ] ] = 1;}\n"
+ "return ret;}\n"
+ "var skipTests = makeKeys( [ \"jstests/dbadmin.js\",\n"
+ "\"jstests/repair.js\",\n"
+ "\"jstests/cursor8.js\",\n"
+ "\"jstests/recstore.js\",\n"
+ "\"jstests/extent.js\",\n"
+ "\"jstests/indexb.js\",\n"
+ "\"jstests/profile1.js\",\n"
+ "\"jstests/mr3.js\",\n"
+ "\"jstests/indexh.js\",\n"
+ "\"jstests/apitest_db.js\",\n"
+ "\"jstests/evalb.js\"] );\n"
+ "var serialTestsArr = [ \"jstests/fsync.js\",\n"
+ "\"jstests/fsync2.js\" ];\n"
+ "var serialTests = makeKeys( serialTestsArr );\n"
+ "params[ 0 ] = serialTestsArr;\n"
+ "var files = listFiles(\"jstests\");\n"
+ "files = Array.shuffle( files );\n"
+ "var i = 0;\n"
+ "files.forEach(\n"
+ "function(x) {\n"
+ "if ( ( /[\\/\\\\]_/.test(x.name) ) ||\n"
+ "( ! /\\.js$/.test(x.name ) ) ||\n"
+ "( x.name in skipTests ) ||\n"
+ "( x.name in serialTests ) ||\n"
+ "! /\\.js$/.test(x.name ) ){\n"
+ "print(\" >>>>>>>>>>>>>>> skipping \" + x.name);\n"
+ "return;}\n"
+ "params[ i % n ].push( x.name );\n"
+ "++i;}\n"
+ ");\n"
+ "params[ 0 ] = Array.shuffle( params[ 0 ] );\n"
+ "for( var i in params ) {\n"
+ "params[ i ].unshift( i );}\n"
+ "return params;}\n"
+ "ParallelTester.fileTester = function() {\n"
+ "var args = argumentsToArray( arguments );\n"
+ "var suite = args.shift();\n"
+ "args.forEach(\n"
+ "function( x ) {\n"
+ "print(\" S\" + suite + \" Test : \" + x + \" ...\");\n"
+ "var time = Date.timeFunc( function() { load(x); }, 1);\n"
+ "print(\" S\" + suite + \" Test : \" + x + \" \" + time + \"ms\" );}\n"
+ ");}\n"
+ "assert.parallelTests = function( params, msg, newScopes ) {\n"
+ "newScopes = newScopes || false;\n"
+ "var wrapper = function( fun, argv ) {\n"
+ "eval (\n"
+ "\"var z = function() {\" +\n"
+ "\"var __parallelTests__fun = \" + fun.toString() + \";\" +\n"
+ "\"var __parallelTests__argv = \" + tojson( argv ) + \";\" +\n"
+ "\"var __parallelTests__passed = false;\" +\n"
+ "\"try {\" +\n"
+ "\"__parallelTests__fun.apply( 0, __parallelTests__argv );\" +\n"
+ "\"__parallelTests__passed = true;\" +\n"
+ "\"} catch ( e ) {\" +\n"
+ "\"print( e );\" +\n"
+ "\"}\" +\n"
+ "\"return __parallelTests__passed;\" +\n"
+ "\"}\"\n"
+ ");\n"
+ "return z;}\n"
+ "var runners = new Array();\n"
+ "for( var i in params ) {\n"
+ "var param = params[ i ];\n"
+ "var test = param.shift();\n"
+ "var t;\n"
+ "if ( newScopes )\n"
+ "t = new ScopedThread( wrapper( test, param ) );\n"
+ "else\n"
+ "t = new Thread( wrapper( test, param ) );\n"
+ "runners.push( t );}\n"
+ "runners.forEach( function( x ) { x.start(); } );\n"
+ "var nFailed = 0;\n"
+ "runners.forEach( function( x ) { if( !x.returnData() ) { ++nFailed; } } );\n"
+ "assert.eq( 0, nFailed, msg );}}\n"
+ "tojsononeline = function( x ){\n"
+ "return tojson( x , \" \" , true );}\n"
+ "tojson = function( x, indent , nolint ){\n"
+ "if ( x === null )\n"
+ "return \"null\";\n"
+ "if ( x === undefined )\n"
+ "return \"undefined\";\n"
+ "if (!indent)\n"
+ "indent = \"\";\n"
+ "switch ( typeof x ) {\n"
+ "case \"string\": {\n"
+ "var s = \"\\\"\";\n"
+ "for ( var i=0; i<x.length; i++ ){\n"
+ "switch (x[i]){\n"
+ "case '\"': s += '\\\\\"'; break;\n"
+ "case '\\\\': s += '\\\\\\\\'; break;\n"
+ "case '\\b': s += '\\\\b'; break;\n"
+ "case '\\f': s += '\\\\f'; break;\n"
+ "case '\\n': s += '\\\\n'; break;\n"
+ "case '\\r': s += '\\\\r'; break;\n"
+ "case '\\t': s += '\\\\t'; break;\n"
+ "default: {\n"
+ "var code = x.charCodeAt(i);\n"
+ "if (code < 0x20){\n"
+ "s += (code < 0x10 ? '\\\\u000' : '\\\\u00') + code.toString(16);\n"
+ "} else {\n"
+ "s += x[i];}}}}\n"
+ "return s + \"\\\"\";}\n"
+ "case \"number\":\n"
+ "case \"boolean\":\n"
+ "return \"\" + x;\n"
+ "case \"object\":{\n"
+ "var s = tojsonObject( x, indent , nolint );\n"
+ "if ( ( nolint == null || nolint == true ) && s.length < 80 && ( indent == null || indent.length == 0 ) ){\n"
+ "s = s.replace( /[\\s\\r\\n ]+/gm , \" \" );}\n"
+ "return s;}\n"
+ "case \"function\":\n"
+ "return x.toString();\n"
+ "default:\n"
+ "throw \"tojson can't handle type \" + ( typeof x );}}\n"
+ "tojsonObject = function( x, indent , nolint ){\n"
+ "var lineEnding = nolint ? \" \" : \"\\n\";\n"
+ "var tabSpace = nolint ? \"\" : \"\\t\";\n"
+ "assert.eq( ( typeof x ) , \"object\" , \"tojsonObject needs object, not [\" + ( typeof x ) + \"]\" );\n"
+ "if (!indent)\n"
+ "indent = \"\";\n"
+ "if ( typeof( x.tojson ) == \"function\" && x.tojson != tojson ) {\n"
+ "return x.tojson(indent,nolint);}\n"
+ "if ( x.constructor && typeof( x.constructor.tojson ) == \"function\" && x.constructor.tojson != tojson ) {\n"
+ "return x.constructor.tojson( x, indent , nolint );}\n"
+ "if ( x.toString() == \"[object MaxKey]\" )\n"
+ "return \"{ $maxKey : 1 }\";\n"
+ "if ( x.toString() == \"[object MinKey]\" )\n"
+ "return \"{ $minKey : 1 }\";\n"
+ "var s = \"{\" + lineEnding;\n"
+ "indent += tabSpace;\n"
+ "var total = 0;\n"
+ "for ( var k in x ) total++;\n"
+ "if ( total == 0 ) {\n"
+ "s += indent + lineEnding;}\n"
+ "var keys = x;\n"
+ "if ( typeof( x._simpleKeys ) == \"function\" )\n"
+ "keys = x._simpleKeys();\n"
+ "var num = 1;\n"
+ "for ( var k in keys ){\n"
+ "var val = x[k];\n"
+ "if ( val == DB.prototype || val == DBCollection.prototype )\n"
+ "continue;\n"
+ "s += indent + \"\\\"\" + k + \"\\\" : \" + tojson( val, indent , nolint );\n"
+ "if (num != total) {\n"
+ "s += \",\";\n"
+ "num++;}\n"
+ "s += lineEnding;}\n"
+ "indent = indent.substring(1);\n"
+ "return s + indent + \"}\";}\n"
+ "shellPrint = function( x ){\n"
+ "it = x;\n"
+ "if ( x != undefined )\n"
+ "shellPrintHelper( x );\n"
+ "if ( db ){\n"
+ "var e = db.getPrevError();\n"
+ "if ( e.err ) {\n"
+ "if( e.nPrev <= 1 )\n"
+ "print( \"error on last call: \" + tojson( e.err ) );\n"
+ "else\n"
+ "print( \"an error \" + tojson(e.err) + \" occurred \" + e.nPrev + \" operations back in the command invocation\" );}\n"
+ "db.resetError();}}\n"
+ "printjson = function(x){\n"
+ "print( tojson( x ) );}\n"
+ "printjsononeline = function(x){\n"
+ "print( tojsononeline( x ) );}\n"
+ "shellPrintHelper = function( x ){\n"
+ "if ( typeof( x ) == \"undefined\" ){\n"
+ "if ( typeof( db ) != \"undefined\" && db.getLastError ){\n"
+ "var e = db.getLastError();\n"
+ "if ( e != null )\n"
+ "print( e );}\n"
+ "return;}\n"
+ "if ( x == __magicNoPrint )\n"
+ "return;\n"
+ "if ( x == null ){\n"
+ "print( \"null\" );\n"
+ "return;}\n"
+ "if ( typeof x != \"object\" )\n"
+ "return print( x );\n"
+ "var p = x.shellPrint;\n"
+ "if ( typeof p == \"function\" )\n"
+ "return x.shellPrint();\n"
+ "var p = x.tojson;\n"
+ "if ( typeof p == \"function\" )\n"
+ "print( x.tojson() );\n"
+ "else\n"
+ "print( tojson( x ) );}\n"
+ "shellAutocomplete = function( prefix ){\n"
+ "var a = [];\n"
+ "//a.push( prefix + \"z\" )\n"
+ "//a.push( prefix + \"y\" )\n"
+ "__autocomplete__ = a;}\n"
+ "shellHelper = function( command , rest , shouldPrint ){\n"
+ "command = command.trim();\n"
+ "var args = rest.trim().replace(/;$/,\"\").split( \"\\s+\" );\n"
+ "if ( ! shellHelper[command] )\n"
+ "throw \"no command [\" + command + \"]\";\n"
+ "var res = shellHelper[command].apply( null , args );\n"
+ "if ( shouldPrint ){\n"
+ "shellPrintHelper( res );}\n"
+ "return res;}\n"
+ "shellHelper.use = function( dbname ){\n"
+ "db = db.getMongo().getDB( dbname );\n"
+ "print( \"switched to db \" + db.getName() );}\n"
+ "shellHelper.it = function(){\n"
+ "if ( typeof( ___it___ ) == \"undefined\" || ___it___ == null ){\n"
+ "print( \"no cursor\" );\n"
+ "return;}\n"
+ "shellPrintHelper( ___it___ );}\n"
+ "shellHelper.show = function( what ){\n"
+ "assert( typeof what == \"string\" );\n"
+ "if( what == \"profile\" ) {\n"
+ "if( db.system.profile.count() == 0 ) {\n"
+ "print(\"db.system.profile is empty\");\n"
+ "print(\"Use db.setProfilingLevel(2) will enable profiling\");\n"
+ "print(\"Use db.system.profile.find() to show raw profile entries\");}\n"
+ "else {\n"
+ "print();\n"
+ "db.system.profile.find({ millis : { $gt : 0 } }).sort({$natural:-1}).limit(5).forEach( function(x){print(\"\"+x.millis+\"ms \" + String(x.ts).substring(0,24)); print(x.info); print(\"\\n\");} )}\n"
+ "return \"\";}\n"
+ "if ( what == \"users\" ){\n"
+ "db.system.users.find().forEach( printjson );\n"
+ "return \"\";}\n"
+ "if ( what == \"collections\" || what == \"tables\" ) {\n"
+ "db.getCollectionNames().forEach( function(x){print(x)} );\n"
+ "return \"\";}\n"
+ "if ( what == \"dbs\" ) {\n"
+ "db.getMongo().getDBNames().sort().forEach( function(x){print(x)} );\n"
+ "return \"\";}\n"
+ "throw \"don't know how to show [\" + what + \"]\";}\n"
+ "if ( typeof( Map ) == \"undefined\" ){\n"
+ "Map = function(){\n"
+ "this._data = {};}}\n"
+ "Map.hash = function( val ){\n"
+ "if ( ! val )\n"
+ "return val;\n"
+ "switch ( typeof( val ) ){\n"
+ "case 'string':\n"
+ "case 'number':\n"
+ "case 'date':\n"
+ "return val.toString();\n"
+ "case 'object':\n"
+ "case 'array':\n"
+ "var s = \"\";\n"
+ "for ( var k in val ){\n"
+ "s += k + val[k];}\n"
+ "return s;}\n"
+ "throw \"can't hash : \" + typeof( val );}\n"
+ "Map.prototype.put = function( key , value ){\n"
+ "var o = this._get( key );\n"
+ "var old = o.value;\n"
+ "o.value = value;\n"
+ "return old;}\n"
+ "Map.prototype.get = function( key ){\n"
+ "return this._get( key ).value;}\n"
+ "Map.prototype._get = function( key ){\n"
+ "var h = Map.hash( key );\n"
+ "var a = this._data[h];\n"
+ "if ( ! a ){\n"
+ "a = [];\n"
+ "this._data[h] = a;}\n"
+ "for ( var i=0; i<a.length; i++ ){\n"
+ "if ( friendlyEqual( key , a[i].key ) ){\n"
+ "return a[i];}}\n"
+ "var o = { key : key , value : null };\n"
+ "a.push( o );\n"
+ "return o;}\n"
+ "Map.prototype.values = function(){\n"
+ "var all = [];\n"
+ "for ( var k in this._data ){\n"
+ "this._data[k].forEach( function(z){ all.push( z.value ); } );}\n"
+ "return all;}\n"
+ "if ( typeof( gc ) == \"undefined\" ){\n"
+ "gc = function(){\n"
+ "print( \"warning: using noop gc()\" );}}\n"
+ "Math.sigFig = function( x , N ){\n"
+ "if ( ! N ){\n"
+ "N = 3;}\n"
+ "var p = Math.pow( 10, N - Math.ceil( Math.log( Math.abs(x) ) / Math.log( 10 )) );\n"
+ "return Math.round(x*p)/p;}\n"
+ "Random = function() {}\n"
+ "Random.srand = function( s ) { _srand( s ); }\n"
+ "Random.rand = function() { return _rand(); }\n"
+ "Random.randInt = function( n ) { return Math.floor( Random.rand() * n ); }\n"
+ "Random.setRandomSeed = function( s ) {\n"
+ "s = s || new Date().getTime();\n"
+ "print( \"setting random seed: \" + s );\n"
+ "Random.srand( s );}\n"
+ "Random.genExp = function( mean ) {\n"
+ "return -Math.log( Random.rand() ) * mean;}\n"
+ "killWithUris = function( uris ) {\n"
+ "var inprog = db.currentOp().inprog;\n"
+ "for( var u in uris ) {\n"
+ "for ( var i in inprog ) {\n"
+ "if ( uris[ u ] == inprog[ i ].client ) {\n"
+ "db.killOp( inprog[ i ].opid );}}}}\n"
+ "Geo = {};\n"
+ "Geo.distance = function( a , b ){\n"
+ "var ax = null;\n"
+ "var ay = null;\n"
+ "var bx = null;\n"
+ "var by = null;\n"
+ "for ( var key in a ){\n"
+ "if ( ax == null )\n"
+ "ax = a[key];\n"
+ "else if ( ay == null )\n"
+ "ay = a[key];}\n"
+ "for ( var key in b ){\n"
+ "if ( bx == null )\n"
+ "bx = b[key];\n"
+ "else if ( by == null )\n"
+ "by = b[key];}\n"
+ "return Math.sqrt( Math.pow( by - ay , 2 ) +\n"
+ "Math.pow( bx - ax , 2 ) );}\n"
+ "rs = function () { return \"try rs.help()\"; }\n"
+ "rs.help = function () {\n"
+ "print(\"\\trs.status() { replSetGetStatus : 1 } checks repl set status\");\n"
+ "print(\"\\trs.initiate() { replSetInitiate : null } initiates set with default settings\");\n"
+ "print(\"\\trs.initiate(cfg) { replSetInitiate : cfg } initiates set with configuration cfg\");\n"
+ "print(\"\\trs.add(hostportstr) add a new member to the set with default attributes\");\n"
+ "print(\"\\trs.add(membercfgobj) add a new member to the set with extra attributes\");\n"
+ "print(\"\\trs.addArb(hostportstr) add a new member which is arbiterOnly:true\");\n"
+ "print(\"\\trs.stepDown() step down as primary (momentarily)\");\n"
+ "print(\"\\trs.conf() return configuration from local.system.replset\");\n"
+ "print(\"\\trs.slaveOk() shorthand for db.getMongo().setSlaveOk()\");\n"
+ "print();\n"
+ "print(\"\\tdb.isMaster() check who is primary\");\n"
+ "print();\n"
+ "print(\"\\tsee also http://<mongod_host>:28017/_replSet for additional diagnostic info\");}\n"
+ "rs.slaveOk = function () { return db.getMongo().setSlaveOk(); }\n"
+ "rs.status = function () { return db._adminCommand(\"replSetGetStatus\"); }\n"
+ "rs.isMaster = function () { return db.isMaster(); }\n"
+ "rs.initiate = function (c) { return db._adminCommand({ replSetInitiate: c }); }\n"
+ "rs.add = function (hostport, arb) {\n"
+ "var cfg = hostport;\n"
+ "var local = db.getSisterDB(\"local\");\n"
+ "assert(local.system.replset.count() <= 1, \"error: local.system.replset has unexpected contents\");\n"
+ "var c = local.system.replset.findOne();\n"
+ "assert(c, \"no config object retrievable from local.system.replset\");\n"
+ "c.version++;\n"
+ "var max = 0;\n"
+ "for (var i in c.members)\n"
+ "if (c.members[i]._id > max) max = c.members[i]._id;\n"
+ "if (isString(hostport)) {\n"
+ "cfg = { _id: max + 1, host: hostport };\n"
+ "if (arb)\n"
+ "cfg.arbiterOnly = true;}\n"
+ "c.members.push(cfg);\n"
+ "return db._adminCommand({ replSetReconfig: c });}\n"
+ "rs.stepDown = function () { return db._adminCommand({ replSetStepDown:true}); }\n"
+ "rs.addArb = function (hn) { return this.add(hn, true); }\n"
+ "rs.conf = function () { return db.getSisterDB(\"local\").system.replset.findOne(); }\n"
+ "help = shellHelper.help = function (x) {\n"
+ "if (x == \"connect\") {\n"
+ "print(\"\\nNormally one specifies the server on the mongo shell command line. Run mongo --help to see those options.\");\n"
+ "print(\"Additional connections may be opened:\\n\");\n"
+ "print(\" var x = new Mongo('host[:port]');\");\n"
+ "print(\" var mydb = x.getDB('mydb');\");\n"
+ "print(\" or\");\n"
+ "print(\" var mydb = connect('host[:port]/mydb');\");\n"
+ "print(\"\\nNote: the REPL prompt only auto-reports getLastError() for the shell command line connection.\\n\");\n"
+ "return;}\n"
+ "if (x == \"misc\") {\n"
+ "print(\"\\tb = new BinData(subtype,base64str) create a BSON BinData value\");\n"
+ "print(\"\\tb.subtype() the BinData subtype (0..255)\");\n"
+ "print(\"\\tb.length() length of the BinData data in bytes\");\n"
+ "print(\"\\tb.hex() the data as a hex encoded string\");\n"
+ "print(\"\\tb.base64() the data as a base 64 encoded string\");\n"
+ "print(\"\\tb.toString()\");\n"
+ "return;}\n"
+ "if (x == \"admin\") {\n"
+ "print(\"\\tls([path]) list files\");\n"
+ "print(\"\\tpwd() returns current directory\");\n"
+ "print(\"\\tlistFiles([path]) returns file list\");\n"
+ "print(\"\\thostname() returns name of this host\");\n"
+ "print(\"\\tcat(fname) returns contents of text file as a string\");\n"
+ "print(\"\\tremoveFile(f) delete a file\");\n"
+ "print(\"\\tload(jsfilename) load and execute a .js file\");\n"
+ "print(\"\\trun(program[, args...]) spawn a program and wait for its completion\");\n"
+ "print(\"\\tsleep(m) sleep m milliseconds\");\n"
+ "print(\"\\tgetMemInfo() diagnostic\");\n"
+ "return;}\n"
+ "if (x == \"test\") {\n"
+ "print(\"\\tstartMongodEmpty(args) DELETES DATA DIR and then starts mongod\");\n"
+ "print(\"\\t returns a connection to the new server\");\n"
+ "print(\"\\tstartMongodTest() DELETES DATA DIR\");\n"
+ "print(\"\\t automatically picks port #s starting at 27000 and increasing\");\n"
+ "print(\"\\t or you can specify the port as the first arg\");\n"
+ "print(\"\\t dir is /data/db/<port>/ if not specified as the 2nd arg\");\n"
+ "print(\"\\t returns a connection to the new server\");\n"
+ "return;}\n"
+ "print(\"\\t\" + \"db.help() help on db methods\");\n"
+ "print(\"\\t\" + \"db.mycoll.help() help on collection methods\");\n"
+ "print(\"\\t\" + \"rs.help() help on replica set methods\");\n"
+ "print(\"\\t\" + \"help connect connecting to a db help\");\n"
+ "print(\"\\t\" + \"help admin administrative help\");\n"
+ "print(\"\\t\" + \"help misc misc things to know\");\n"
+ "print();\n"
+ "print(\"\\t\" + \"show dbs show database names\");\n"
+ "print(\"\\t\" + \"show collections show collections in current database\");\n"
+ "print(\"\\t\" + \"show users show users in current database\");\n"
+ "print(\"\\t\" + \"show profile show most recent system.profile entries with time >= 1ms\");\n"
+ "print(\"\\t\" + \"use <db_name> set current database\");\n"
+ "print(\"\\t\" + \"db.foo.find() list objects in collection foo\");\n"
+ "print(\"\\t\" + \"db.foo.find( { a : 1 } ) list objects in foo where a == 1\");\n"
+ "print(\"\\t\" + \"it result of the last line evaluated; use to further iterate\");\n"
+ "print(\"\\t\" + \"exit quit the mongo shell\");}\n"
+ "if ( typeof DB == \"undefined\" ){\n"
+ "DB = function( mongo , name ){\n"
+ "this._mongo = mongo;\n"
+ "this._name = name;}}\n"
+ "DB.prototype.getMongo = function(){\n"
+ "assert( this._mongo , \"why no mongo!\" );\n"
+ "return this._mongo;}\n"
+ "DB.prototype.getSisterDB = function( name ){\n"
+ "return this.getMongo().getDB( name );}\n"
+ "DB.prototype.getName = function(){\n"
+ "return this._name;}\n"
+ "DB.prototype.stats = function(){\n"
+ "return this.runCommand( { dbstats : 1 } );}\n"
+ "DB.prototype.getCollection = function( name ){\n"
+ "return new DBCollection( this._mongo , this , name , this._name + \".\" + name );}\n"
+ "DB.prototype.commandHelp = function( name ){\n"
+ "var c = {};\n"
+ "c[name] = 1;\n"
+ "c.help = true;\n"
+ "return this.runCommand( c ).help;}\n"
+ "DB.prototype.runCommand = function( obj ){\n"
+ "if ( typeof( obj ) == \"string\" ){\n"
+ "var n = {};\n"
+ "n[obj] = 1;\n"
+ "obj = n;}\n"
+ "return this.getCollection( \"$cmd\" ).findOne( obj );}\n"
+ "DB.prototype._dbCommand = DB.prototype.runCommand;\n"
+ "DB.prototype._adminCommand = function( obj ){\n"
+ "if ( this._name == \"admin\" )\n"
+ "return this.runCommand( obj );\n"
+ "return this.getSisterDB( \"admin\" ).runCommand( obj );}\n"
+ "DB.prototype.addUser = function( username , pass, readOnly ){\n"
+ "readOnly = readOnly || false;\n"
+ "var c = this.getCollection( \"system.users\" );\n"
+ "var u = c.findOne( { user : username } ) || { user : username };\n"
+ "u.readOnly = readOnly;\n"
+ "u.pwd = hex_md5( username + \":mongo:\" + pass );\n"
+ "print( tojson( u ) );\n"
+ "c.save( u );}\n"
+ "DB.prototype.removeUser = function( username ){\n"
+ "this.getCollection( \"system.users\" ).remove( { user : username } );}\n"
+ "DB.prototype.__pwHash = function( nonce, username, pass ) {\n"
+ "return hex_md5( nonce + username + hex_md5( username + \":mongo:\" + pass ) );}\n"
+ "DB.prototype.auth = function( username , pass ){\n"
+ "var n = this.runCommand( { getnonce : 1 } );\n"
+ "var a = this.runCommand(\n"
+ "{\n"
+ "authenticate : 1 ,\n"
+ "user : username ,\n"
+ "nonce : n.nonce ,\n"
+ "key : this.__pwHash( n.nonce, username, pass )}\n"
+ ");\n"
+ "return a.ok;}\n"
+ "\n"
+ "DB.prototype.createCollection = function(name, opt) {\n"
+ "var options = opt || {};\n"
+ "var cmd = { create: name, capped: options.capped, size: options.size, max: options.max };\n"
+ "var res = this._dbCommand(cmd);\n"
+ "return res;}\n"
+ "\n"
+ "DB.prototype.getProfilingLevel = function() {\n"
+ "var res = this._dbCommand( { profile: -1 } );\n"
+ "return res ? res.was : null;}\n"
+ "\n"
+ "DB.prototype.dropDatabase = function() {\n"
+ "if ( arguments.length )\n"
+ "throw \"dropDatabase doesn't take arguments\";\n"
+ "return this._dbCommand( { dropDatabase: 1 } );}\n"
+ "DB.prototype.shutdownServer = function() {\n"
+ "if( \"admin\" != this._name ){\n"
+ "return \"shutdown command only works with the admin database; try 'use admin'\";}\n"
+ "try {\n"
+ "var res = this._dbCommand(\"shutdown\");\n"
+ "if( res )\n"
+ "throw \"shutdownServer failed: \" + res.errmsg;\n"
+ "throw \"shutdownServer failed\";}\n"
+ "catch ( e ){\n"
+ "assert( tojson( e ).indexOf( \"error doing query: failed\" ) >= 0 , \"unexpected error: \" + tojson( e ) );\n"
+ "print( \"server should be down...\" );}}\n"
+ "\n"
+ "DB.prototype.cloneDatabase = function(from) {\n"
+ "assert( isString(from) && from.length );\n"
+ "return this._dbCommand( { clone: from } );}\n"
+ "\n"
+ "DB.prototype.cloneCollection = function(from, collection, query) {\n"
+ "assert( isString(from) && from.length );\n"
+ "assert( isString(collection) && collection.length );\n"
+ "collection = this._name + \".\" + collection;\n"
+ "query = query || {};\n"
+ "return this._dbCommand( { cloneCollection:collection, from:from, query:query } );}\n"
+ "\n"
+ "DB.prototype.copyDatabase = function(fromdb, todb, fromhost, username, password) {\n"
+ "assert( isString(fromdb) && fromdb.length );\n"
+ "assert( isString(todb) && todb.length );\n"
+ "fromhost = fromhost || \"\";\n"
+ "if ( username && password ) {\n"
+ "var n = this._adminCommand( { copydbgetnonce : 1, fromhost:fromhost } );\n"
+ "return this._adminCommand( { copydb:1, fromhost:fromhost, fromdb:fromdb, todb:todb, username:username, nonce:n.nonce, key:this.__pwHash( n.nonce, username, password ) } );\n"
+ "} else {\n"
+ "return this._adminCommand( { copydb:1, fromhost:fromhost, fromdb:fromdb, todb:todb } );}}\n"
+ "\n"
+ "DB.prototype.repairDatabase = function() {\n"
+ "return this._dbCommand( { repairDatabase: 1 } );}\n"
+ "DB.prototype.help = function() {\n"
+ "print(\"DB methods:\");\n"
+ "print(\"\\tdb.addUser(username, password[, readOnly=false])\");\n"
+ "print(\"\\tdb.auth(username, password)\");\n"
+ "print(\"\\tdb.cloneDatabase(fromhost)\");\n"
+ "print(\"\\tdb.commandHelp(name) returns the help for the command\");\n"
+ "print(\"\\tdb.copyDatabase(fromdb, todb, fromhost)\");\n"
+ "print(\"\\tdb.createCollection(name, { size : ..., capped : ..., max : ... } )\");\n"
+ "print(\"\\tdb.currentOp() displays the current operation in the db\");\n"
+ "print(\"\\tdb.dropDatabase()\");\n"
+ "print(\"\\tdb.eval(func, args) run code server-side\");\n"
+ "print(\"\\tdb.getCollection(cname) same as db['cname'] or db.cname\");\n"
+ "print(\"\\tdb.getCollectionNames()\");\n"
+ "print(\"\\tdb.getLastError() - just returns the err msg string\");\n"
+ "print(\"\\tdb.getLastErrorObj() - return full status object\");\n"
+ "print(\"\\tdb.getMongo() get the server connection object\");\n"
+ "print(\"\\tdb.getMongo().setSlaveOk() allow this connection to read from the nonmaster member of a replica pair\");\n"
+ "print(\"\\tdb.getName()\");\n"
+ "print(\"\\tdb.getPrevError()\");\n"
+ "print(\"\\tdb.getProfilingLevel()\");\n"
+ "print(\"\\tdb.getReplicationInfo()\");\n"
+ "print(\"\\tdb.getSisterDB(name) get the db at the same server as this one\");\n"
+ "print(\"\\tdb.isMaster() check replica primary status\");\n"
+ "print(\"\\tdb.killOp(opid) kills the current operation in the db\");\n"
+ "print(\"\\tdb.listCommands() lists all the db commands\");\n"
+ "print(\"\\tdb.printCollectionStats()\");\n"
+ "print(\"\\tdb.printReplicationInfo()\");\n"
+ "print(\"\\tdb.printSlaveReplicationInfo()\");\n"
+ "print(\"\\tdb.printShardingStatus()\");\n"
+ "print(\"\\tdb.removeUser(username)\");\n"
+ "print(\"\\tdb.repairDatabase()\");\n"
+ "print(\"\\tdb.resetError()\");\n"
+ "print(\"\\tdb.runCommand(cmdObj) run a database command. if cmdObj is a string, turns it into { cmdObj : 1 }\");\n"
+ "print(\"\\tdb.serverStatus()\");\n"
+ "print(\"\\tdb.setProfilingLevel(level,<slowms>) 0=off 1=slow 2=all\");\n"
+ "print(\"\\tdb.shutdownServer()\");\n"
+ "print(\"\\tdb.stats()\");\n"
+ "print(\"\\tdb.version() current version of the server\");\n"
+ "print(\"\\tdb.getMongo().setSlaveOk() allow queries on a replication slave server\");\n"
+ "return __magicNoPrint;}\n"
+ "DB.prototype.printCollectionStats = function(){\n"
+ "var mydb = this;\n"
+ "this.getCollectionNames().forEach(\n"
+ "function(z){\n"
+ "print( z );\n"
+ "printjson( mydb.getCollection(z).stats() );\n"
+ "print( \"---\" );}\n"
+ ");}\n"
+ "\n"
+ "DB.prototype.setProfilingLevel = function(level,slowms) {\n"
+ "if (level < 0 || level > 2) {\n"
+ "throw { dbSetProfilingException : \"input level \" + level + \" is out of range [0..2]\" };}\n"
+ "var cmd = { profile: level };\n"
+ "if ( slowms )\n"
+ "cmd[\"slowms\"] = slowms;\n"
+ "return this._dbCommand( cmd );}\n"
+ "\n"
+ "DB.prototype.eval = function(jsfunction) {\n"
+ "var cmd = { $eval : jsfunction };\n"
+ "if ( arguments.length > 1 ) {\n"
+ "cmd.args = argumentsToArray( arguments ).slice(1);}\n"
+ "var res = this._dbCommand( cmd );\n"
+ "if (!res.ok)\n"
+ "throw tojson( res );\n"
+ "return res.retval;}\n"
+ "DB.prototype.dbEval = DB.prototype.eval;\n"
+ "\n"
+ "DB.prototype.groupeval = function(parmsObj) {\n"
+ "var groupFunction = function() {\n"
+ "var parms = args[0];\n"
+ "var c = db[parms.ns].find(parms.cond||{});\n"
+ "var map = new Map();\n"
+ "var pks = parms.key ? Object.keySet( parms.key ) : null;\n"
+ "var pkl = pks ? pks.length : 0;\n"
+ "var key = {};\n"
+ "while( c.hasNext() ) {\n"
+ "var obj = c.next();\n"
+ "if ( pks ) {\n"
+ "for( var i=0; i<pkl; i++ ){\n"
+ "var k = pks[i];\n"
+ "key[k] = obj[k];}}\n"
+ "else {\n"
+ "key = parms.$keyf(obj);}\n"
+ "var aggObj = map.get(key);\n"
+ "if( aggObj == null ) {\n"
+ "var newObj = Object.extend({}, key);\n"
+ "aggObj = Object.extend(newObj, parms.initial)\n"
+ "map.put( key , aggObj );}\n"
+ "parms.$reduce(obj, aggObj);}\n"
+ "return map.values();}\n"
+ "return this.eval(groupFunction, this._groupFixParms( parmsObj ));}\n"
+ "DB.prototype.groupcmd = function( parmsObj ){\n"
+ "var ret = this.runCommand( { \"group\" : this._groupFixParms( parmsObj ) } );\n"
+ "if ( ! ret.ok ){\n"
+ "throw \"group command failed: \" + tojson( ret );}\n"
+ "return ret.retval;}\n"
+ "DB.prototype.group = DB.prototype.groupcmd;\n"
+ "DB.prototype._groupFixParms = function( parmsObj ){\n"
+ "var parms = Object.extend({}, parmsObj);\n"
+ "if( parms.reduce ) {\n"
+ "parms.$reduce = parms.reduce;\n"
+ "delete parms.reduce;}\n"
+ "if( parms.keyf ) {\n"
+ "parms.$keyf = parms.keyf;\n"
+ "delete parms.keyf;}\n"
+ "return parms;}\n"
+ "DB.prototype.resetError = function(){\n"
+ "return this.runCommand( { reseterror : 1 } );}\n"
+ "DB.prototype.forceError = function(){\n"
+ "return this.runCommand( { forceerror : 1 } );}\n"
+ "DB.prototype.getLastError = function( w , wtimeout ){\n"
+ "var res = this.getLastErrorObj( w , wtimeout );\n"
+ "if ( ! res.ok )\n"
+ "throw \"getlasterror failed: \" + tojson( res );\n"
+ "return res.err;}\n"
+ "DB.prototype.getLastErrorObj = function( w , wtimeout ){\n"
+ "var cmd = { getlasterror : 1 };\n"
+ "if ( w ){\n"
+ "cmd.w = w;\n"
+ "if ( wtimeout )\n"
+ "cmd.wtimeout = wtimeout;}\n"
+ "var res = this.runCommand( cmd );\n"
+ "if ( ! res.ok )\n"
+ "throw \"getlasterror failed: \" + tojson( res );\n"
+ "return res;}\n"
+ "DB.prototype.getLastErrorCmd = DB.prototype.getLastErrorObj;\n"
+ "/* Return the last error which has occurred, even if not the very last error.\n"
+ "Returns:\n"
+ "{ err : <error message>, nPrev : <how_many_ops_back_occurred>, ok : 1 }\n"
+ "result.err will be null if no error has occurred.\n"
+ "*/\n"
+ "DB.prototype.getPrevError = function(){\n"
+ "return this.runCommand( { getpreverror : 1 } );}\n"
+ "DB.prototype.getCollectionNames = function(){\n"
+ "var all = [];\n"
+ "var nsLength = this._name.length + 1;\n"
+ "var c = this.getCollection( \"system.namespaces\" ).find();\n"
+ "while ( c.hasNext() ){\n"
+ "var name = c.next().name;\n"
+ "if ( name.indexOf( \"$\" ) >= 0 && name.indexOf( \".oplog.$\" ) < 0 )\n"
+ "continue;\n"
+ "all.push( name.substring( nsLength ) );}\n"
+ "return all.sort();}\n"
+ "DB.prototype.tojson = function(){\n"
+ "return this._name;}\n"
+ "DB.prototype.toString = function(){\n"
+ "return this._name;}\n"
+ "DB.prototype.isMaster = function () { return this.runCommand(\"isMaster\"); }\n"
+ "DB.prototype.currentOp = function(){\n"
+ "return db.$cmd.sys.inprog.findOne();}\n"
+ "DB.prototype.currentOP = DB.prototype.currentOp;\n"
+ "DB.prototype.killOp = function(op) {\n"
+ "if( !op )\n"
+ "throw \"no opNum to kill specified\";\n"
+ "return db.$cmd.sys.killop.findOne({'op':op});}\n"
+ "DB.prototype.killOP = DB.prototype.killOp;\n"
+ "DB.tsToSeconds = function(x){\n"
+ "if ( x.t && x.i )\n"
+ "return x.t / 1000;\n"
+ "return x / 4294967296;}\n"
+ "\n"
+ "DB.prototype.getReplicationInfo = function() {\n"
+ "var db = this.getSisterDB(\"local\");\n"
+ "var result = { };\n"
+ "var ol = db.system.namespaces.findOne({name:\"local.oplog.$main\"});\n"
+ "if( ol && ol.options ) {\n"
+ "result.logSizeMB = ol.options.size / 1000 / 1000;\n"
+ "} else {\n"
+ "result.errmsg = \"local.oplog.$main, or its options, not found in system.namespaces collection (not --master?)\";\n"
+ "return result;}\n"
+ "var firstc = db.oplog.$main.find().sort({$natural:1}).limit(1);\n"
+ "var lastc = db.oplog.$main.find().sort({$natural:-1}).limit(1);\n"
+ "if( !firstc.hasNext() || !lastc.hasNext() ) {\n"
+ "result.errmsg = \"objects not found in local.oplog.$main -- is this a new and empty db instance?\";\n"
+ "result.oplogMainRowCount = db.oplog.$main.count();\n"
+ "return result;}\n"
+ "var first = firstc.next();\n"
+ "var last = lastc.next();\n"
+ "{\n"
+ "var tfirst = first.ts;\n"
+ "var tlast = last.ts;\n"
+ "if( tfirst && tlast ) {\n"
+ "tfirst = DB.tsToSeconds( tfirst );\n"
+ "tlast = DB.tsToSeconds( tlast );\n"
+ "result.timeDiff = tlast - tfirst;\n"
+ "result.timeDiffHours = Math.round(result.timeDiff / 36)/100;\n"
+ "result.tFirst = (new Date(tfirst*1000)).toString();\n"
+ "result.tLast = (new Date(tlast*1000)).toString();\n"
+ "result.now = Date();}\n"
+ "else {\n"
+ "result.errmsg = \"ts element not found in oplog objects\";}}\n"
+ "return result;}\n"
+ "DB.prototype.printReplicationInfo = function() {\n"
+ "var result = this.getReplicationInfo();\n"
+ "if( result.errmsg ) {\n"
+ "print(tojson(result));\n"
+ "return;}\n"
+ "print(\"configured oplog size: \" + result.logSizeMB + \"MB\");\n"
+ "print(\"log length start to end: \" + result.timeDiff + \"secs (\" + result.timeDiffHours + \"hrs)\");\n"
+ "print(\"oplog first event time: \" + result.tFirst);\n"
+ "print(\"oplog last event time: \" + result.tLast);\n"
+ "print(\"now: \" + result.now);}\n"
+ "DB.prototype.printSlaveReplicationInfo = function() {\n"
+ "function g(x) {\n"
+ "assert( x , \"how could this be null (printSlaveReplicationInfo gx)\" )\n"
+ "print(\"source: \" + x.host);\n"
+ "if ( x.syncedTo ){\n"
+ "var st = new Date( DB.tsToSeconds( x.syncedTo ) * 1000 );\n"
+ "var now = new Date();\n"
+ "print(\"\\t syncedTo: \" + st.toString() );\n"
+ "var ago = (now-st)/1000;\n"
+ "var hrs = Math.round(ago/36)/100;\n"
+ "print(\"\\t\\t = \" + Math.round(ago) + \"secs ago (\" + hrs + \"hrs)\");}\n"
+ "else {\n"
+ "print( \"\\t doing initial sync\" );}}\n"
+ "var L = this.getSisterDB(\"local\");\n"
+ "if( L.sources.count() == 0 ) {\n"
+ "print(\"local.sources is empty; is this db a --slave?\");\n"
+ "return;}\n"
+ "L.sources.find().forEach(g);}\n"
+ "DB.prototype.serverBuildInfo = function(){\n"
+ "return this._adminCommand( \"buildinfo\" );}\n"
+ "DB.prototype.serverStatus = function(){\n"
+ "return this._adminCommand( \"serverStatus\" );}\n"
+ "DB.prototype.serverCmdLineOpts = function(){\n"
+ "return this._adminCommand( \"getCmdLineOpts\" );}\n"
+ "DB.prototype.version = function(){\n"
+ "return this.serverBuildInfo().version;}\n"
+ "DB.prototype.listCommands = function(){\n"
+ "var x = this.runCommand( \"listCommands\" );\n"
+ "for ( var name in x.commands ){\n"
+ "var c = x.commands[name];\n"
+ "var s = name + \": \";\n"
+ "switch ( c.lockType ){\n"
+ "case -1: s += \"read-lock\"; break;\n"
+ "case 0: s += \"no-lock\"; break;\n"
+ "case 1: s += \"write-lock\"; break;\n"
+ "default: s += c.lockType;}\n"
+ "if (c.adminOnly) s += \" adminOnly \";\n"
+ "if (c.adminOnly) s += \" slaveOk \";\n"
+ "s += \"\\n \";\n"
+ "s += c.help.replace(/\\n/g, '\\n ');\n"
+ "s += \"\\n\";\n"
+ "print( s );}}\n"
+ "DB.prototype.printShardingStatus = function(){\n"
+ "printShardingStatus( this.getSisterDB( \"config\" ) );}\n"
+ "if ( typeof Mongo == \"undefined\" ){\n"
+ "Mongo = function( host ){\n"
+ "this.init( host );}}\n"
+ "if ( ! Mongo.prototype ){\n"
+ "throw \"Mongo.prototype not defined\";}\n"
+ "if ( ! Mongo.prototype.find )\n"
+ "Mongo.prototype.find = function( ns , query , fields , limit , skip ){ throw \"find not implemented\"; }\n"
+ "if ( ! Mongo.prototype.insert )\n"
+ "Mongo.prototype.insert = function( ns , obj ){ throw \"insert not implemented\"; }\n"
+ "if ( ! Mongo.prototype.remove )\n"
+ "Mongo.prototype.remove = function( ns , pattern ){ throw \"remove not implemented;\" }\n"
+ "if ( ! Mongo.prototype.update )\n"
+ "Mongo.prototype.update = function( ns , query , obj , upsert ){ throw \"update not implemented;\" }\n"
+ "if ( typeof mongoInject == \"function\" ){\n"
+ "mongoInject( Mongo.prototype );}\n"
+ "Mongo.prototype.setSlaveOk = function() {\n"
+ "this.slaveOk = true;}\n"
+ "Mongo.prototype.getDB = function( name ){\n"
+ "return new DB( this , name );}\n"
+ "Mongo.prototype.getDBs = function(){\n"
+ "var res = this.getDB( \"admin\" ).runCommand( { \"listDatabases\" : 1 } );\n"
+ "assert( res.ok == 1 , \"listDatabases failed:\" + tojson( res ) );\n"
+ "return res;}\n"
+ "Mongo.prototype.getDBNames = function(){\n"
+ "return this.getDBs().databases.map(\n"
+ "function(z){\n"
+ "return z.name;}\n"
+ ");}\n"
+ "Mongo.prototype.getCollection = function(ns){\n"
+ "var idx = ns.indexOf( \".\" );\n"
+ "if ( idx < 0 )\n"
+ "throw \"need . in ns\";\n"
+ "var db = ns.substring( 0 , idx );\n"
+ "var c = ns.substring( idx + 1 );\n"
+ "return this.getDB( db ).getCollection( c );}\n"
+ "Mongo.prototype.toString = function(){\n"
+ "return \"connection to \" + this.host;}\n"
+ "Mongo.prototype.tojson = Mongo.prototype.toString;\n"
+ "connect = function( url , user , pass ){\n"
+ "chatty( \"connecting to: \" + url )\n"
+ "if ( user && ! pass )\n"
+ "throw \"you specified a user and not a password. either you need a password, or you're using the old connect api\";\n"
+ "var idx = url.lastIndexOf( \"/\" );\n"
+ "var db;\n"
+ "if ( idx < 0 )\n"
+ "db = new Mongo().getDB( url );\n"
+ "else\n"
+ "db = new Mongo( url.substring( 0 , idx ) ).getDB( url.substring( idx + 1 ) );\n"
+ "if ( user && pass ){\n"
+ "if ( ! db.auth( user , pass ) ){\n"
+ "throw \"couldn't login\";}}\n"
+ "return db;}\n"
+ "MR = {};\n"
+ "MR.init = function(){\n"
+ "$max = 0;\n"
+ "$arr = [];\n"
+ "emit = MR.emit;\n"
+ "$numEmits = 0;\n"
+ "$numReduces = 0;\n"
+ "$numReducesToDB = 0;\n"
+ "gc();}\n"
+ "MR.cleanup = function(){\n"
+ "MR.init();\n"
+ "gc();}\n"
+ "MR.emit = function(k,v){\n"
+ "$numEmits++;\n"
+ "var num = nativeHelper.apply( get_num_ , [ k ] );\n"
+ "var data = $arr[num];\n"
+ "if ( ! data ){\n"
+ "data = { key : k , values : new Array(1000) , count : 0 };\n"
+ "$arr[num] = data;}\n"
+ "data.values[data.count++] = v;\n"
+ "$max = Math.max( $max , data.count );}\n"
+ "MR.doReduce = function( useDB ){\n"
+ "$numReduces++;\n"
+ "if ( useDB )\n"
+ "$numReducesToDB++;\n"
+ "$max = 0;\n"
+ "for ( var i=0; i<$arr.length; i++){\n"
+ "var data = $arr[i];\n"
+ "if ( ! data )\n"
+ "continue;\n"
+ "if ( useDB ){\n"
+ "var x = tempcoll.findOne( { _id : data.key } );\n"
+ "if ( x ){\n"
+ "data.values[data.count++] = x.value;}}\n"
+ "var r = $reduce( data.key , data.values.slice( 0 , data.count ) );\n"
+ "if ( r && r.length && r[0] ){\n"
+ "data.values = r;\n"
+ "data.count = r.length;}\n"
+ "else{\n"
+ "data.values[0] = r;\n"
+ "data.count = 1;}\n"
+ "$max = Math.max( $max , data.count );\n"
+ "if ( useDB ){\n"
+ "if ( data.count == 1 ){\n"
+ "tempcoll.save( { _id : data.key , value : data.values[0] } );}\n"
+ "else {\n"
+ "tempcoll.save( { _id : data.key , value : data.values.slice( 0 , data.count ) } );}}}}\n"
+ "MR.check = function(){\n"
+ "if ( $max < 2000 && $arr.length < 1000 ){\n"
+ "return 0;}\n"
+ "MR.doReduce();\n"
+ "if ( $max < 2000 && $arr.length < 1000 ){\n"
+ "return 1;}\n"
+ "MR.doReduce( true );\n"
+ "$arr = [];\n"
+ "$max = 0;\n"
+ "reset_num();\n"
+ "gc();\n"
+ "return 2;}\n"
+ "MR.finalize = function(){\n"
+ "tempcoll.find().forEach(\n"
+ "function(z){\n"
+ "z.value = $finalize( z._id , z.value );\n"
+ "tempcoll.save( z );}\n"
+ ");}\n"
+ "if ( typeof DBQuery == \"undefined\" ){\n"
+ "DBQuery = function( mongo , db , collection , ns , query , fields , limit , skip , batchSize ){\n"
+ "this._mongo = mongo;\n"
+ "this._db = db;\n"
+ "this._collection = collection;\n"
+ "this._ns = ns;\n"
+ "this._query = query || {};\n"
+ "this._fields = fields;\n"
+ "this._limit = limit || 0;\n"
+ "this._skip = skip || 0;\n"
+ "this._batchSize = batchSize || 0;\n"
+ "this._cursor = null;\n"
+ "this._numReturned = 0;\n"
+ "this._special = false;\n"
+ "this._prettyShell = false;}\n"
+ "print( \"DBQuery probably won't have array access \" );}\n"
+ "DBQuery.prototype.help = function () {\n"
+ "print(\"find() modifiers\")\n"
+ "print(\"\\t.sort( {...} )\")\n"
+ "print(\"\\t.limit( n )\")\n"
+ "print(\"\\t.skip( n )\")\n"
+ "print(\"\\t.count() - total # of objects matching query, ignores skip,limit\")\n"
+ "print(\"\\t.size() - total # of objects cursor would return, honors skip,limit\")\n"
+ "print(\"\\t.explain([verbose])\")\n"
+ "print(\"\\t.hint(...)\")\n"
+ "print(\"\\t.showDiskLoc() - adds a $diskLoc field to each returned object\")\n"
+ "print(\"\\nCursor methods\");\n"
+ "print(\"\\t.forEach( func )\")\n"
+ "print(\"\\t.print() - output to console in full pretty format\")\n"
+ "print(\"\\t.map( func )\")\n"
+ "print(\"\\t.hasNext()\")\n"
+ "print(\"\\t.next()\")}\n"
+ "DBQuery.prototype.clone = function(){\n"
+ "var q = new DBQuery( this._mongo , this._db , this._collection , this._ns ,\n"
+ "this._query , this._fields ,\n"
+ "this._limit , this._skip , this._batchSize );\n"
+ "q._special = this._special;\n"
+ "return q;}\n"
+ "DBQuery.prototype._ensureSpecial = function(){\n"
+ "if ( this._special )\n"
+ "return;\n"
+ "var n = { query : this._query };\n"
+ "this._query = n;\n"
+ "this._special = true;}\n"
+ "DBQuery.prototype._checkModify = function(){\n"
+ "if ( this._cursor )\n"
+ "throw \"query already executed\";}\n"
+ "DBQuery.prototype._exec = function(){\n"
+ "if ( ! this._cursor ){\n"
+ "assert.eq( 0 , this._numReturned );\n"
+ "this._cursor = this._mongo.find( this._ns , this._query , this._fields , this._limit , this._skip , this._batchSize );\n"
+ "this._cursorSeen = 0;}\n"
+ "return this._cursor;}\n"
+ "DBQuery.prototype.limit = function( limit ){\n"
+ "this._checkModify();\n"
+ "this._limit = limit;\n"
+ "return this;}\n"
+ "DBQuery.prototype.batchSize = function( batchSize ){\n"
+ "this._checkModify();\n"
+ "this._batchSize = batchSize;\n"
+ "return this;}\n"
+ "DBQuery.prototype.skip = function( skip ){\n"
+ "this._checkModify();\n"
+ "this._skip = skip;\n"
+ "return this;}\n"
+ "DBQuery.prototype.hasNext = function(){\n"
+ "this._exec();\n"
+ "if ( this._limit > 0 && this._cursorSeen >= this._limit )\n"
+ "return false;\n"
+ "var o = this._cursor.hasNext();\n"
+ "return o;}\n"
+ "DBQuery.prototype.next = function(){\n"
+ "this._exec();\n"
+ "var o = this._cursor.hasNext();\n"
+ "if ( o )\n"
+ "this._cursorSeen++;\n"
+ "else\n"
+ "throw \"error hasNext: \" + o;\n"
+ "var ret = this._cursor.next();\n"
+ "if ( ret.$err && this._numReturned == 0 && ! this.hasNext() )\n"
+ "throw \"error: \" + tojson( ret );\n"
+ "this._numReturned++;\n"
+ "return ret;}\n"
+ "DBQuery.prototype.objsLeftInBatch = function(){\n"
+ "this._exec();\n"
+ "var ret = this._cursor.objsLeftInBatch();\n"
+ "if ( ret.$err )\n"
+ "throw \"error: \" + tojson( ret );\n"
+ "return ret;}\n"
+ "DBQuery.prototype.toArray = function(){\n"
+ "if ( this._arr )\n"
+ "return this._arr;\n"
+ "var a = [];\n"
+ "while ( this.hasNext() )\n"
+ "a.push( this.next() );\n"
+ "this._arr = a;\n"
+ "return a;}\n"
+ "DBQuery.prototype.count = function( applySkipLimit ){\n"
+ "var cmd = { count: this._collection.getName() };\n"
+ "if ( this._query ){\n"
+ "if ( this._special )\n"
+ "cmd.query = this._query.query;\n"
+ "else\n"
+ "cmd.query = this._query;}\n"
+ "cmd.fields = this._fields || {};\n"
+ "if ( applySkipLimit ){\n"
+ "if ( this._limit )\n"
+ "cmd.limit = this._limit;\n"
+ "if ( this._skip )\n"
+ "cmd.skip = this._skip;}\n"
+ "var res = this._db.runCommand( cmd );\n"
+ "if( res && res.n != null ) return res.n;\n"
+ "throw \"count failed: \" + tojson( res );}\n"
+ "DBQuery.prototype.size = function(){\n"
+ "return this.count( true );}\n"
+ "DBQuery.prototype.countReturn = function(){\n"
+ "var c = this.count();\n"
+ "if ( this._skip )\n"
+ "c = c - this._skip;\n"
+ "if ( this._limit > 0 && this._limit < c )\n"
+ "return this._limit;\n"
+ "return c;}\n"
+ "\n"
+ "DBQuery.prototype.itcount = function(){\n"
+ "var num = 0;\n"
+ "while ( this.hasNext() ){\n"
+ "num++;\n"
+ "this.next();}\n"
+ "return num;}\n"
+ "DBQuery.prototype.length = function(){\n"
+ "return this.toArray().length;}\n"
+ "DBQuery.prototype._addSpecial = function( name , value ){\n"
+ "this._ensureSpecial();\n"
+ "this._query[name] = value;\n"
+ "return this;}\n"
+ "DBQuery.prototype.sort = function( sortBy ){\n"
+ "return this._addSpecial( \"orderby\" , sortBy );}\n"
+ "DBQuery.prototype.hint = function( hint ){\n"
+ "return this._addSpecial( \"$hint\" , hint );}\n"
+ "DBQuery.prototype.min = function( min ) {\n"
+ "return this._addSpecial( \"$min\" , min );}\n"
+ "DBQuery.prototype.max = function( max ) {\n"
+ "return this._addSpecial( \"$max\" , max );}\n"
+ "DBQuery.prototype.showDiskLoc = function() {\n"
+ "return this._addSpecial( \"$showDiskLoc\" , true);}\n"
+ "DBQuery.prototype.forEach = function( func ){\n"
+ "while ( this.hasNext() )\n"
+ "func( this.next() );}\n"
+ "DBQuery.prototype.map = function( func ){\n"
+ "var a = [];\n"
+ "while ( this.hasNext() )\n"
+ "a.push( func( this.next() ) );\n"
+ "return a;}\n"
+ "DBQuery.prototype.arrayAccess = function( idx ){\n"
+ "return this.toArray()[idx];}\n"
+ "DBQuery.prototype.explain = function (verbose) {\n"
+ "/* verbose=true --> include allPlans, oldPlan fields */\n"
+ "var n = this.clone();\n"
+ "n._ensureSpecial();\n"
+ "n._query.$explain = true;\n"
+ "n._limit = n._limit * -1;\n"
+ "var e = n.next();\n"
+ "if (!verbose) {\n"
+ "delete e.allPlans;\n"
+ "delete e.oldPlan;}\n"
+ "return e;}\n"
+ "DBQuery.prototype.snapshot = function(){\n"
+ "this._ensureSpecial();\n"
+ "this._query.$snapshot = true;\n"
+ "return this;}\n"
+ "DBQuery.prototype.pretty = function(){\n"
+ "this._prettyShell = true;\n"
+ "return this;}\n"
+ "DBQuery.prototype.shellPrint = function(){\n"
+ "try {\n"
+ "var n = 0;\n"
+ "while ( this.hasNext() && n < DBQuery.shellBatchSize ){\n"
+ "var s = this._prettyShell ? tojson( this.next() ) : tojson( this.next() , \"\" , true );\n"
+ "print( s );\n"
+ "n++;}\n"
+ "if ( this.hasNext() ){\n"
+ "print( \"has more\" );\n"
+ "___it___ = this;}\n"
+ "else {\n"
+ "___it___ = null;}}\n"
+ "catch ( e ){\n"
+ "print( e );}}\n"
+ "DBQuery.prototype.toString = function(){\n"
+ "return \"DBQuery: \" + this._ns + \" -> \" + tojson( this.query );}\n"
+ "DBQuery.shellBatchSize = 20;\n"
+ "// or db[\"colName\"]\n"
+ "if ( ( typeof DBCollection ) == \"undefined\" ){\n"
+ "DBCollection = function( mongo , db , shortName , fullName ){\n"
+ "this._mongo = mongo;\n"
+ "this._db = db;\n"
+ "this._shortName = shortName;\n"
+ "this._fullName = fullName;\n"
+ "this.verify();}}\n"
+ "DBCollection.prototype.verify = function(){\n"
+ "assert( this._fullName , \"no fullName\" );\n"
+ "assert( this._shortName , \"no shortName\" );\n"
+ "assert( this._db , \"no db\" );\n"
+ "assert.eq( this._fullName , this._db._name + \".\" + this._shortName , \"name mismatch\" );\n"
+ "assert( this._mongo , \"no mongo in DBCollection\" );}\n"
+ "DBCollection.prototype.getName = function(){\n"
+ "return this._shortName;}\n"
+ "DBCollection.prototype.help = function () {\n"
+ "var shortName = this.getName();\n"
+ "print(\"DBCollection help\");\n"
+ "print(\"\\tdb.\" + shortName + \".find().help() - show DBCursor help\");\n"
+ "print(\"\\tdb.\" + shortName + \".count()\");\n"
+ "print(\"\\tdb.\" + shortName + \".dataSize()\");\n"
+ "print(\"\\tdb.\" + shortName + \".distinct( key ) - eg. db.\" + shortName + \".distinct( 'x' )\");\n"
+ "print(\"\\tdb.\" + shortName + \".drop() drop the collection\");\n"
+ "print(\"\\tdb.\" + shortName + \".dropIndex(name)\");\n"
+ "print(\"\\tdb.\" + shortName + \".dropIndexes()\");\n"
+ "print(\"\\tdb.\" + shortName + \".ensureIndex(keypattern,options) - options should be an object with these possible fields: name, unique, dropDups\");\n"
+ "print(\"\\tdb.\" + shortName + \".reIndex()\");\n"
+ "print(\"\\tdb.\" + shortName + \".find( [query] , [fields]) - first parameter is an optional query filter. second parameter is optional set of fields to return.\");\n"
+ "print(\"\\t e.g. db.\" + shortName + \".find( { x : 77 } , { name : 1 , x : 1 } )\");\n"
+ "print(\"\\tdb.\" + shortName + \".find(...).count()\");\n"
+ "print(\"\\tdb.\" + shortName + \".find(...).limit(n)\");\n"
+ "print(\"\\tdb.\" + shortName + \".find(...).skip(n)\");\n"
+ "print(\"\\tdb.\" + shortName + \".find(...).sort(...)\");\n"
+ "print(\"\\tdb.\" + shortName + \".findOne([query])\");\n"
+ "print(\"\\tdb.\" + shortName + \".findAndModify( { update : ... , remove : bool [, query: {}, sort: {}, 'new': false] } )\");\n"
+ "print(\"\\tdb.\" + shortName + \".getDB() get DB object associated with collection\");\n"
+ "print(\"\\tdb.\" + shortName + \".getIndexes()\");\n"
+ "print(\"\\tdb.\" + shortName + \".group( { key : ..., initial: ..., reduce : ...[, cond: ...] } )\");\n"
+ "print(\"\\tdb.\" + shortName + \".mapReduce( mapFunction , reduceFunction , <optional params> )\");\n"
+ "print(\"\\tdb.\" + shortName + \".remove(query)\");\n"
+ "print(\"\\tdb.\" + shortName + \".renameCollection( newName , <dropTarget> ) renames the collection.\");\n"
+ "print(\"\\tdb.\" + shortName + \".runCommand( name , <options> ) runs a db command with the given name where the first param is the collection name\");\n"
+ "print(\"\\tdb.\" + shortName + \".save(obj)\");\n"
+ "print(\"\\tdb.\" + shortName + \".stats()\");\n"
+ "print(\"\\tdb.\" + shortName + \".storageSize() - includes free space allocated to this collection\");\n"
+ "print(\"\\tdb.\" + shortName + \".totalIndexSize() - size in bytes of all the indexes\");\n"
+ "print(\"\\tdb.\" + shortName + \".totalSize() - storage allocated for all data and indexes\");\n"
+ "print(\"\\tdb.\" + shortName + \".update(query, object[, upsert_bool, multi_bool])\");\n"
+ "print(\"\\tdb.\" + shortName + \".validate() - SLOW\");\n"
+ "print(\"\\tdb.\" + shortName + \".getShardVersion() - only for use with sharding\");\n"
+ "return __magicNoPrint;}\n"
+ "DBCollection.prototype.getFullName = function(){\n"
+ "return this._fullName;}\n"
+ "DBCollection.prototype.getDB = function(){\n"
+ "return this._db;}\n"
+ "DBCollection.prototype._dbCommand = function( cmd , params ){\n"
+ "if ( typeof( cmd ) == \"object\" )\n"
+ "return this._db._dbCommand( cmd );\n"
+ "var c = {};\n"
+ "c[cmd] = this.getName();\n"
+ "if ( params )\n"
+ "Object.extend( c , params );\n"
+ "return this._db._dbCommand( c );}\n"
+ "DBCollection.prototype.runCommand = DBCollection.prototype._dbCommand;\n"
+ "DBCollection.prototype._massageObject = function( q ){\n"
+ "if ( ! q )\n"
+ "return {};\n"
+ "var type = typeof q;\n"
+ "if ( type == \"function\" )\n"
+ "return { $where : q };\n"
+ "if ( q.isObjectId )\n"
+ "return { _id : q };\n"
+ "if ( type == \"object\" )\n"
+ "return q;\n"
+ "if ( type == \"string\" ){\n"
+ "if ( q.length == 24 )\n"
+ "return { _id : q };\n"
+ "return { $where : q };}\n"
+ "throw \"don't know how to massage : \" + type;}\n"
+ "DBCollection.prototype._validateObject = function( o ){\n"
+ "if ( o._ensureSpecial && o._checkModify )\n"
+ "throw \"can't save a DBQuery object\";}\n"
+ "DBCollection._allowedFields = { $id : 1 , $ref : 1 };\n"
+ "DBCollection.prototype._validateForStorage = function( o ){\n"
+ "this._validateObject( o );\n"
+ "for ( var k in o ){\n"
+ "if ( k.indexOf( \".\" ) >= 0 ) {\n"
+ "throw \"can't have . in field names [\" + k + \"]\" ;}\n"
+ "if ( k.indexOf( \"$\" ) == 0 && ! DBCollection._allowedFields[k] ) {\n"
+ "throw \"field names cannot start with $ [\" + k + \"]\";}\n"
+ "if ( o[k] !== null && typeof( o[k] ) === \"object\" ) {\n"
+ "this._validateForStorage( o[k] );}}\n"
+ "};\n"
+ "DBCollection.prototype.find = function( query , fields , limit , skip ){\n"
+ "return new DBQuery( this._mongo , this._db , this ,\n"
+ "this._fullName , this._massageObject( query ) , fields , limit , skip );}\n"
+ "DBCollection.prototype.findOne = function( query , fields ){\n"
+ "var cursor = this._mongo.find( this._fullName , this._massageObject( query ) || {} , fields , -1 , 0 , 0 );\n"
+ "if ( ! cursor.hasNext() )\n"
+ "return null;\n"
+ "var ret = cursor.next();\n"
+ "if ( cursor.hasNext() ) throw \"findOne has more than 1 result!\";\n"
+ "if ( ret.$err )\n"
+ "throw \"error \" + tojson( ret );\n"
+ "return ret;}\n"
+ "DBCollection.prototype.insert = function( obj , _allow_dot ){\n"
+ "if ( ! obj )\n"
+ "throw \"no object passed to insert!\";\n"
+ "if ( ! _allow_dot ) {\n"
+ "this._validateForStorage( obj );}\n"
+ "if ( typeof( obj._id ) == \"undefined\" ){\n"
+ "var tmp = obj;\n"
+ "obj = {_id: new ObjectId()};\n"
+ "for (var key in tmp){\n"
+ "obj[key] = tmp[key];}}\n"
+ "this._mongo.insert( this._fullName , obj );\n"
+ "this._lastID = obj._id;}\n"
+ "DBCollection.prototype.remove = function( t ){\n"
+ "this._mongo.remove( this._fullName , this._massageObject( t ) );}\n"
+ "DBCollection.prototype.update = function( query , obj , upsert , multi ){\n"
+ "assert( query , \"need a query\" );\n"
+ "assert( obj , \"need an object\" );\n"
+ "this._validateObject( obj );\n"
+ "this._mongo.update( this._fullName , query , obj , upsert ? true : false , multi ? true : false );}\n"
+ "DBCollection.prototype.save = function( obj ){\n"
+ "if ( obj == null || typeof( obj ) == \"undefined\" )\n"
+ "throw \"can't save a null\";\n"
+ "if ( typeof( obj._id ) == \"undefined\" ){\n"
+ "obj._id = new ObjectId();\n"
+ "return this.insert( obj );}\n"
+ "else {\n"
+ "return this.update( { _id : obj._id } , obj , true );}}\n"
+ "DBCollection.prototype._genIndexName = function( keys ){\n"
+ "var name = \"\";\n"
+ "for ( var k in keys ){\n"
+ "var v = keys[k];\n"
+ "if ( typeof v == \"function\" )\n"
+ "continue;\n"
+ "if ( name.length > 0 )\n"
+ "name += \"_\";\n"
+ "name += k + \"_\";\n"
+ "if ( typeof v == \"number\" )\n"
+ "name += v;}\n"
+ "return name;}\n"
+ "DBCollection.prototype._indexSpec = function( keys, options ) {\n"
+ "var ret = { ns : this._fullName , key : keys , name : this._genIndexName( keys ) };\n"
+ "if ( ! options ){}\n"
+ "else if ( typeof ( options ) == \"string\" )\n"
+ "ret.name = options;\n"
+ "else if ( typeof ( options ) == \"boolean\" )\n"
+ "ret.unique = true;\n"
+ "else if ( typeof ( options ) == \"object\" ){\n"
+ "if ( options.length ){\n"
+ "var nb = 0;\n"
+ "for ( var i=0; i<options.length; i++ ){\n"
+ "if ( typeof ( options[i] ) == \"string\" )\n"
+ "ret.name = options[i];\n"
+ "else if ( typeof( options[i] ) == \"boolean\" ){\n"
+ "if ( options[i] ){\n"
+ "if ( nb == 0 )\n"
+ "ret.unique = true;\n"
+ "if ( nb == 1 )\n"
+ "ret.dropDups = true;}\n"
+ "nb++;}}}\n"
+ "else {\n"
+ "Object.extend( ret , options );}}\n"
+ "else {\n"
+ "throw \"can't handle: \" + typeof( options );}\n"
+ "/*\n"
+ "return ret;\n"
+ "var name;\n"
+ "var nTrue = 0;\n"
+ "if ( ! isObject( options ) ) {\n"
+ "options = [ options ];}\n"
+ "if ( options.length ){\n"
+ "for( var i = 0; i < options.length; ++i ) {\n"
+ "var o = options[ i ];\n"
+ "if ( isString( o ) ) {\n"
+ "ret.name = o;\n"
+ "} else if ( typeof( o ) == \"boolean\" ) {\n"
+ "if ( o ) {\n"
+ "++nTrue;}}}\n"
+ "if ( nTrue > 0 ) {\n"
+ "ret.unique = true;}\n"
+ "if ( nTrue > 1 ) {\n"
+ "ret.dropDups = true;}}\n"
+ "*/\n"
+ "return ret;}\n"
+ "DBCollection.prototype.createIndex = function( keys , options ){\n"
+ "var o = this._indexSpec( keys, options );\n"
+ "this._db.getCollection( \"system.indexes\" ).insert( o , true );}\n"
+ "DBCollection.prototype.ensureIndex = function( keys , options ){\n"
+ "var name = this._indexSpec( keys, options ).name;\n"
+ "this._indexCache = this._indexCache || {};\n"
+ "if ( this._indexCache[ name ] ){\n"
+ "return;}\n"
+ "this.createIndex( keys , options );\n"
+ "if ( this.getDB().getLastError() == \"\" ) {\n"
+ "this._indexCache[name] = true;}}\n"
+ "DBCollection.prototype.resetIndexCache = function(){\n"
+ "this._indexCache = {};}\n"
+ "DBCollection.prototype.reIndex = function() {\n"
+ "return this._db.runCommand({ reIndex: this.getName() });}\n"
+ "DBCollection.prototype.dropIndexes = function(){\n"
+ "this.resetIndexCache();\n"
+ "var res = this._db.runCommand( { deleteIndexes: this.getName(), index: \"*\" } );\n"
+ "assert( res , \"no result from dropIndex result\" );\n"
+ "if ( res.ok )\n"
+ "return res;\n"
+ "if ( res.errmsg.match( /not found/ ) )\n"
+ "return res;\n"
+ "throw \"error dropping indexes : \" + tojson( res );}\n"
+ "DBCollection.prototype.drop = function(){\n"
+ "this.resetIndexCache();\n"
+ "var ret = this._db.runCommand( { drop: this.getName() } );\n"
+ "if ( ! ret.ok ){\n"
+ "if ( ret.errmsg == \"ns not found\" )\n"
+ "return false;\n"
+ "throw \"drop failed: \" + tojson( ret );}\n"
+ "return true;}\n"
+ "DBCollection.prototype.findAndModify = function(args){\n"
+ "var cmd = { findandmodify: this.getName() };\n"
+ "for (var key in args){\n"
+ "cmd[key] = args[key];}\n"
+ "var ret = this._db.runCommand( cmd );\n"
+ "if ( ! ret.ok ){\n"
+ "if (ret.errmsg == \"No matching object found\"){\n"
+ "return null;}\n"
+ "throw \"findAndModifyFailed failed: \" + tojson( ret.errmsg );}\n"
+ "return ret.value;}\n"
+ "DBCollection.prototype.renameCollection = function( newName , dropTarget ){\n"
+ "return this._db._adminCommand( { renameCollection : this._fullName ,\n"
+ "to : this._db._name + \".\" + newName ,\n"
+ "dropTarget : dropTarget } )}\n"
+ "DBCollection.prototype.validate = function() {\n"
+ "var res = this._db.runCommand( { validate: this.getName() } );\n"
+ "res.valid = false;\n"
+ "var raw = res.result || res.raw;\n"
+ "if ( raw ){\n"
+ "var str = \"-\" + tojson( raw );\n"
+ "res.valid = ! ( str.match( /exception/ ) || str.match( /corrupt/ ) );\n"
+ "var p = /lastExtentSize:(\\d+)/;\n"
+ "var r = p.exec( str );\n"
+ "if ( r ){\n"
+ "res.lastExtentSize = Number( r[1] );}}\n"
+ "return res;}\n"
+ "DBCollection.prototype.getShardVersion = function(){\n"
+ "return this._db._adminCommand( { getShardVersion : this._fullName } );}\n"
+ "DBCollection.prototype.getIndexes = function(){\n"
+ "return this.getDB().getCollection( \"system.indexes\" ).find( { ns : this.getFullName() } ).toArray();}\n"
+ "DBCollection.prototype.getIndices = DBCollection.prototype.getIndexes;\n"
+ "DBCollection.prototype.getIndexSpecs = DBCollection.prototype.getIndexes;\n"
+ "DBCollection.prototype.getIndexKeys = function(){\n"
+ "return this.getIndexes().map(\n"
+ "function(i){\n"
+ "return i.key;}\n"
+ ");}\n"
+ "DBCollection.prototype.count = function( x ){\n"
+ "return this.find( x ).count();}\n"
+ "\n"
+ "DBCollection.prototype.clean = function() {\n"
+ "return this._dbCommand( { clean: this.getName() } );}\n"
+ "\n"
+ "DBCollection.prototype.dropIndex = function(index) {\n"
+ "assert(index , \"need to specify index to dropIndex\" );\n"
+ "if ( ! isString( index ) && isObject( index ) )\n"
+ "index = this._genIndexName( index );\n"
+ "var res = this._dbCommand( \"deleteIndexes\" ,{ index: index } );\n"
+ "this.resetIndexCache();\n"
+ "return res;}\n"
+ "DBCollection.prototype.copyTo = function( newName ){\n"
+ "return this.getDB().eval(\n"
+ "function( collName , newName ){\n"
+ "var from = db[collName];\n"
+ "var to = db[newName];\n"
+ "to.ensureIndex( { _id : 1 } );\n"
+ "var count = 0;\n"
+ "var cursor = from.find();\n"
+ "while ( cursor.hasNext() ){\n"
+ "var o = cursor.next();\n"
+ "count++;\n"
+ "to.save( o );}\n"
+ "return count;\n"
+ "} , this.getName() , newName\n"
+ ");}\n"
+ "DBCollection.prototype.getCollection = function( subName ){\n"
+ "return this._db.getCollection( this._shortName + \".\" + subName );}\n"
+ "DBCollection.prototype.stats = function( scale ){\n"
+ "return this._db.runCommand( { collstats : this._shortName , scale : scale } );}\n"
+ "DBCollection.prototype.dataSize = function(){\n"
+ "return this.stats().size;}\n"
+ "DBCollection.prototype.storageSize = function(){\n"
+ "return this.stats().storageSize;}\n"
+ "DBCollection.prototype.totalIndexSize = function( verbose ){\n"
+ "var stats = this.stats();\n"
+ "if (verbose){\n"
+ "for (var ns in stats.indexSizes){\n"
+ "print( ns + \"\\t\" + stats.indexSizes[ns] );}}\n"
+ "return stats.totalIndexSize;}\n"
+ "DBCollection.prototype.totalSize = function(){\n"
+ "var total = this.storageSize();\n"
+ "var mydb = this._db;\n"
+ "var shortName = this._shortName;\n"
+ "this.getIndexes().forEach(\n"
+ "function( spec ){\n"
+ "var coll = mydb.getCollection( shortName + \".$\" + spec.name );\n"
+ "var mysize = coll.storageSize();\n"
+ "//print( coll + \"\\t\" + mysize + \"\\t\" + tojson( coll.validate() ) );\n"
+ "total += coll.dataSize();}\n"
+ ");\n"
+ "return total;}\n"
+ "DBCollection.prototype.convertToCapped = function( bytes ){\n"
+ "if ( ! bytes )\n"
+ "throw \"have to specify # of bytes\";\n"
+ "return this._dbCommand( { convertToCapped : this._shortName , size : bytes } )}\n"
+ "DBCollection.prototype.exists = function(){\n"
+ "return this._db.system.namespaces.findOne( { name : this._fullName } );}\n"
+ "DBCollection.prototype.isCapped = function(){\n"
+ "var e = this.exists();\n"
+ "return ( e && e.options && e.options.capped ) ? true : false;}\n"
+ "DBCollection.prototype.distinct = function( keyString , query ){\n"
+ "var res = this._dbCommand( { distinct : this._shortName , key : keyString , query : query || {} } );\n"
+ "if ( ! res.ok )\n"
+ "throw \"distinct failed: \" + tojson( res );\n"
+ "return res.values;}\n"
+ "DBCollection.prototype.group = function( params ){\n"
+ "params.ns = this._shortName;\n"
+ "return this._db.group( params );}\n"
+ "DBCollection.prototype.groupcmd = function( params ){\n"
+ "params.ns = this._shortName;\n"
+ "return this._db.groupcmd( params );}\n"
+ "MapReduceResult = function( db , o ){\n"
+ "Object.extend( this , o );\n"
+ "this._o = o;\n"
+ "this._keys = Object.keySet( o );\n"
+ "this._db = db;\n"
+ "this._coll = this._db.getCollection( this.result );}\n"
+ "MapReduceResult.prototype._simpleKeys = function(){\n"
+ "return this._o;}\n"
+ "MapReduceResult.prototype.find = function(){\n"
+ "return DBCollection.prototype.find.apply( this._coll , arguments );}\n"
+ "MapReduceResult.prototype.drop = function(){\n"
+ "return this._coll.drop();}\n"
+ "\n"
+ "MapReduceResult.prototype.convertToSingleObject = function(){\n"
+ "var z = {};\n"
+ "this._coll.find().forEach( function(a){ z[a._id] = a.value; } );\n"
+ "return z;}\n"
+ "\n"
+ "DBCollection.prototype.mapReduce = function( map , reduce , optional ){\n"
+ "var c = { mapreduce : this._shortName , map : map , reduce : reduce };\n"
+ "if ( optional )\n"
+ "Object.extend( c , optional );\n"
+ "var raw = this._db.runCommand( c );\n"
+ "if ( ! raw.ok )\n"
+ "throw \"map reduce failed: \" + tojson( raw );\n"
+ "return new MapReduceResult( this._db , raw );}\n"
+ "DBCollection.prototype.toString = function(){\n"
+ "return this.getFullName();}\n"
+ "DBCollection.prototype.toString = function(){\n"
+ "return this.getFullName();}\n"
+ "DBCollection.prototype.tojson = DBCollection.prototype.toString;\n"
+ "DBCollection.prototype.shellPrint = DBCollection.prototype.toString;\n"
+ ;
+
diff --git a/shell/servers.js b/shell/servers.js
index 7b306d7..eb548ea 100644
--- a/shell/servers.js
+++ b/shell/servers.js
@@ -993,6 +993,7 @@ ReplSetTest = function( opts ){
this.name = opts.name || "testReplSet";
this.host = opts.host || getHostName();
this.numNodes = opts.nodes || 0;
+ this.oplogSize = opts.oplogSize || 2;
this.useSeedList = opts.useSeedList || false;
this.bridged = opts.bridged || false;
@@ -1114,7 +1115,7 @@ ReplSetTest.prototype.getOptions = function( n , extra , putBinaryFirst ){
extra = {};
if ( ! extra.oplogSize )
- extra.oplogSize = "2";
+ extra.oplogSize = this.oplogSize;
var a = []
@@ -1124,8 +1125,12 @@ ReplSetTest.prototype.getOptions = function( n , extra , putBinaryFirst ){
a.push( "--replSet" );
-
- a.push( this.getURL() )
+ if( this.useSeedList ) {
+ a.push( this.getURL() );
+ }
+ else {
+ a.push( this.name );
+ }
a.push( "--noprealloc", "--smallfiles" );
@@ -1188,6 +1193,21 @@ ReplSetTest.prototype.callIsMaster = function() {
return master || false;
}
+ReplSetTest.prototype.awaitSecondaryNodes = function( timeout ) {
+ var master = this.getMaster();
+ var slaves = this.liveNodes.slaves;
+ var len = slaves.length;
+
+ this.attempt({context: this, timeout: 60000, desc: "Awaiting secondaries"}, function() {
+ var ready = true;
+ for(var i=0; i<len; i++) {
+ ready = ready && slaves[i].getDB("admin").runCommand({ismaster: 1})['secondary'];
+ }
+
+ return ready;
+ });
+}
+
ReplSetTest.prototype.getMaster = function( timeout ) {
var tries = 0;
var sleepTime = 500;
diff --git a/shell/shell_utils.cpp b/shell/shell_utils.cpp
index e968d73..5260015 100644
--- a/shell/shell_utils.cpp
+++ b/shell/shell_utils.cpp
@@ -439,7 +439,7 @@ namespace mongo {
try {
// This assumes there aren't any 0's in the mongo program output.
// Hope that's ok.
- const unsigned bufSize = 8192;
+ const unsigned bufSize = 64000;
char buf[ bufSize ];
char temp[ bufSize ];
char *start = buf;
diff --git a/tools/stat.cpp b/tools/stat.cpp
index 05318a9..d94cf8d 100644
--- a/tools/stat.cpp
+++ b/tools/stat.cpp
@@ -76,7 +76,7 @@ namespace mongo {
out << " faults/s \t- # of pages faults/sec (linux only)\n";
out << " locked \t- percent of time in global write lock\n";
out << " idx miss \t- percent of btree page misses (sampled)\n";
- out << " q t|r|w \t- lock queue lengths (total|read|write)\n";
+ out << " q t|r|w \t- ops waiting for lock from db.currentOp() (total|read|write)\n";
out << " conn \t- number of open connections\n";
}
diff --git a/util/concurrency/mutex.h b/util/concurrency/mutex.h
index 9ab3960..797ab77 100644
--- a/util/concurrency/mutex.h
+++ b/util/concurrency/mutex.h
@@ -115,7 +115,7 @@ namespace mongo {
}
}
};
- extern MutexDebugger mutexDebugger;
+ extern MutexDebugger &mutexDebugger;
// If you create a local static instance of this class, that instance will be destroyed
// before all global static objects are destroyed, so __destroyingStatics will be set
diff --git a/util/concurrency/task.cpp b/util/concurrency/task.cpp
index 05e43e5..99288bb 100644
--- a/util/concurrency/task.cpp
+++ b/util/concurrency/task.cpp
@@ -139,9 +139,14 @@ namespace mongo {
}
}
} catch(std::exception& e) {
- log() << "Server::doWork() exception " << e.what() << endl;
- } catch(...) {
- log() << "Server::doWork() unknown exception!" << endl;
+ log() << "Server::doWork task:" << name() << " exception:" << e.what() << endl;
+ }
+ catch(const char *p) {
+ log() << "Server::doWork task:" << name() << " unknown c exception:" <<
+ ((p&&strlen(p)<800)?p:"?") << endl;
+ }
+ catch(...) {
+ log() << "Server::doWork unknown exception task:" << name() << endl;
}
}
}
diff --git a/util/concurrency/vars.cpp b/util/concurrency/vars.cpp
index 2b46946..8863a27 100644
--- a/util/concurrency/vars.cpp
+++ b/util/concurrency/vars.cpp
@@ -23,7 +23,9 @@
namespace mongo {
mutex _atomicMutex("_atomicMutex");
- MutexDebugger mutexDebugger;
+
+ // intentional leak. otherwise destructor orders can be problematic at termination.
+ MutexDebugger &mutexDebugger = *(new MutexDebugger());
MutexDebugger::MutexDebugger() :
x( *(new boost::mutex()) ), magic(0x12345678) {
diff --git a/util/goodies.h b/util/goodies.h
index 1dfabdc..c43f356 100644
--- a/util/goodies.h
+++ b/util/goodies.h
@@ -61,7 +61,7 @@ namespace mongo {
o << '\n';
for (i = 0; i < size; i++)
o << ' ' << strings[i] << '\n';
-
+ o.flush();
free (strings);
}
#else
@@ -543,14 +543,16 @@ namespace mongo {
_num = _outof - used;
}
- int available(){
+ int available() const {
return _num;
}
- int used(){
+ int used() const {
return _outof - _num;
}
+ int outof() const { return _outof; }
+
private:
int _outof;
int _num;
diff --git a/util/log.cpp b/util/log.cpp
index bfd9154..334c66b 100644
--- a/util/log.cpp
+++ b/util/log.cpp
@@ -83,7 +83,7 @@ namespace mongo {
}
- FILE* tmp = fopen(_path.c_str(), (_append ? "a" : "w"));
+ FILE* tmp = freopen(_path.c_str(), (_append ? "a" : "w"), stdout);
if (!tmp){
cerr << "can't open: " << _path.c_str() << " for log file" << endl;
dbexit( EXIT_BADOPTIONS );
@@ -92,10 +92,6 @@ namespace mongo {
Logstream::setLogFile(tmp); // after this point no thread will be using old file
- if (_file){
- fclose(_file);
- }
-
_file = tmp;
_opened = time(0);
}
diff --git a/util/log.h b/util/log.h
index 1f11c81..87b7b7e 100644
--- a/util/log.h
+++ b/util/log.h
@@ -295,10 +295,13 @@ namespace mongo {
inline Nullstream& log() {
return Logstream::get().prolog();
}
+
+ inline Nullstream& error() {
+ return log( LL_ERROR );
+ }
- /* TODOCONCURRENCY */
- inline ostream& stdcout() {
- return cout;
+ inline Nullstream& warning() {
+ return log( LL_WARNING );
}
/* default impl returns "" -- mongod overrides */
@@ -326,40 +329,40 @@ namespace mongo {
s << "errno:" << x << ' ';
#if defined(_WIN32)
- LPTSTR errorText = NULL;
- FormatMessage(
- FORMAT_MESSAGE_FROM_SYSTEM
- |FORMAT_MESSAGE_ALLOCATE_BUFFER
- |FORMAT_MESSAGE_IGNORE_INSERTS,
- NULL,
- x, 0,
- (LPTSTR) &errorText, // output
- 0, // minimum size for output buffer
- NULL);
- if( errorText ) {
- string x = toUtf8String(errorText);
- for( string::iterator i = x.begin(); i != x.end(); i++ ) {
- if( *i == '\n' || *i == '\r' )
- break;
- s << *i;
- }
- LocalFree(errorText);
- }
- else
- s << strerror(x);
- /*
- DWORD n = FormatMessage(
- FORMAT_MESSAGE_ALLOCATE_BUFFER |
- FORMAT_MESSAGE_FROM_SYSTEM |
- FORMAT_MESSAGE_IGNORE_INSERTS,
- NULL, x,
- MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
- (LPTSTR) &lpMsgBuf, 0, NULL);
- */
-#else
- s << strerror(x);
-#endif
- return s.str();
+ LPTSTR errorText = NULL;
+ FormatMessage(
+ FORMAT_MESSAGE_FROM_SYSTEM
+ |FORMAT_MESSAGE_ALLOCATE_BUFFER
+ |FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL,
+ x, 0,
+ (LPTSTR) &errorText, // output
+ 0, // minimum size for output buffer
+ NULL);
+ if( errorText ) {
+ string x = toUtf8String(errorText);
+ for( string::iterator i = x.begin(); i != x.end(); i++ ) {
+ if( *i == '\n' || *i == '\r' )
+ break;
+ s << *i;
+ }
+ LocalFree(errorText);
+ }
+ else
+ s << strerror(x);
+ /*
+ DWORD n = FormatMessage(
+ FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL, x,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPTSTR) &lpMsgBuf, 0, NULL);
+ */
+#else
+ s << strerror(x);
+#endif
+ return s.str();
}
/** output the error # and error message with prefix.
diff --git a/util/message.cpp b/util/message.cpp
index cfff420..a809c1f 100644
--- a/util/message.cpp
+++ b/util/message.cpp
@@ -201,10 +201,12 @@ namespace mongo {
if ( x == ECONNABORTED || x == EBADF ) {
log() << "Listener on port " << _port << " aborted" << endl;
return;
- } if ( x == 0 && inShutdown() ){
+ }
+ if ( x == 0 && inShutdown() ) {
return; // socket closed
}
- log() << "Listener: accept() returns " << s << " " << errnoWithDescription(x) << endl;
+ if( !inShutdown() )
+ log() << "Listener: accept() returns " << s << " " << errnoWithDescription(x) << endl;
continue;
}
if (from.getType() != AF_UNIX)
@@ -255,14 +257,10 @@ namespace mongo {
_cur = _buf;
}
- int len() {
- return _cur - _buf;
- }
+ int len() const { return _cur - _buf; }
private:
-
MessagingPort* _port;
-
char * _buf;
char * _cur;
};
@@ -272,10 +270,13 @@ namespace mongo {
mongo::mutex m;
public:
Ports() : ports(), m("Ports") {}
- void closeAll() { \
+ void closeAll(unsigned skip_mask) {
scoped_lock bl(m);
- for ( set<MessagingPort*>::iterator i = ports.begin(); i != ports.end(); i++ )
+ for ( set<MessagingPort*>::iterator i = ports.begin(); i != ports.end(); i++ ) {
+ if( (*i)->tag & skip_mask )
+ continue;
(*i)->shutdown();
+ }
}
void insert(MessagingPort* p) {
scoped_lock bl(m);
@@ -291,18 +292,16 @@ namespace mongo {
// are being destructed during termination.
Ports& ports = *(new Ports());
-
-
- void closeAllSockets() {
- ports.closeAll();
+ void MessagingPort::closeAllSockets(unsigned mask) {
+ ports.closeAll(mask);
}
- MessagingPort::MessagingPort(int _sock, const SockAddr& _far) : sock(_sock), piggyBackData(0), farEnd(_far), _timeout() {
+ MessagingPort::MessagingPort(int _sock, const SockAddr& _far) : sock(_sock), piggyBackData(0), farEnd(_far), _timeout(), tag(0) {
_logLevel = 0;
ports.insert(this);
}
- MessagingPort::MessagingPort( int timeout, int ll ) {
+ MessagingPort::MessagingPort( int timeout, int ll ) : tag(0) {
_logLevel = ll;
ports.insert(this);
sock = -1;
@@ -341,7 +340,7 @@ namespace mongo {
sock = socket(farEnd.getType(), SOCK_STREAM, 0);
if ( sock == INVALID_SOCKET ) {
- log(_logLevel) << "ERROR: connect(): invalid socket? " << errnoWithDescription() << endl;
+ log(_logLevel) << "ERROR: connect invalid socket " << errnoWithDescription() << endl;
return false;
}
@@ -630,7 +629,6 @@ namespace mongo {
MSGID NextMsgId;
- bool usingClientIds = 0;
ThreadLocalValue<int> clientId;
struct MsgStart {
@@ -642,12 +640,6 @@ namespace mongo {
MSGID nextMessageId(){
MSGID msgid = NextMsgId++;
-
- if ( usingClientIds ){
- msgid = msgid & 0xFFFF;
- msgid = msgid | clientId.get();
- }
-
return msgid;
}
@@ -656,9 +648,6 @@ namespace mongo {
}
void setClientId( int id ){
- usingClientIds = true;
- id = id & 0xFFFF0000;
- massert( 10445 , "invalid id" , id );
clientId.set( id );
}
diff --git a/util/message.h b/util/message.h
index 13f31df..203ad83 100644
--- a/util/message.h
+++ b/util/message.h
@@ -30,7 +30,7 @@ namespace mongo {
class PiggyBackData;
typedef AtomicUInt MSGID;
- class Listener {
+ class Listener : boost::noncopyable {
public:
Listener(const string &ip, int p, bool logConnect=true ) : _port(p), _ip(ip), _logConnect(logConnect), _elapsedTime(0){ }
virtual ~Listener() {
@@ -74,7 +74,7 @@ namespace mongo {
static const Listener* _timeTracker;
};
- class AbstractMessagingPort {
+ class AbstractMessagingPort : boost::noncopyable {
public:
virtual ~AbstractMessagingPort() { }
virtual void reply(Message& received, Message& response, MSGID responseTo) = 0; // like the reply below, but doesn't rely on received.data still being available
@@ -86,6 +86,7 @@ namespace mongo {
virtual int getClientId(){
int x = remotePort();
x = x << 16;
+ x |= ( ( 0xFF0 & (long long)this ) >> 8 ); // lowest byte in pointer often meaningless
return x;
}
};
@@ -135,6 +136,11 @@ namespace mongo {
int _timeout;
int _logLevel; // passed to log() when logging errors
+ static void closeAllSockets(unsigned tagMask = 0xffffffff);
+
+ /* ports can be tagged with various classes. see closeAllSockets(tag). defaults to 0. */
+ unsigned tag;
+
friend class PiggyBackData;
};
@@ -169,6 +175,30 @@ namespace mongo {
return "";
}
}
+
+ inline bool opIsWrite( int op ){
+ switch ( op ){
+
+ case 0:
+ case opReply:
+ case dbMsg:
+ case dbQuery:
+ case dbGetMore:
+ case dbKillCursors:
+ return false;
+
+ case dbUpdate:
+ case dbInsert:
+ case dbDelete:
+ return false;
+
+ default:
+ PRINT(op);
+ assert(0);
+ return "";
+ }
+
+ }
#pragma pack(1)
/* see http://www.mongodb.org/display/DOCS/Mongo+Wire+Protocol
diff --git a/util/mmap.h b/util/mmap.h
index f4875be..c954ef7 100644
--- a/util/mmap.h
+++ b/util/mmap.h
@@ -168,7 +168,9 @@ namespace mongo {
long length() {
return len;
}
-
+
+ string filename() const { return _filename; }
+
private:
static void updateLength( const char *filename, long &length );
diff --git a/util/mongoutils/README b/util/mongoutils/README
index d3d874b..ab614b6 100755
--- a/util/mongoutils/README
+++ b/util/mongoutils/README
@@ -1,7 +1,7 @@
-mongoutils namespace requirements:
+ mongoutils namespace requirements:
-(1) code is not database specific, rather, true utilities
-(2) are cross platform
-(3) may require boost headers, but not libs
-(4) are clean and easy to use in any c++ project without pulling in lots of other stuff
-(5) apache license
+ (1) code is not database specific, rather, true utilities
+ (2) are cross platform
+ (3) may require boost headers, but not libs
+ (4) are clean and easy to use in any c++ project without pulling in lots of other stuff
+ (5) apache license
diff --git a/util/optime.h b/util/optime.h
index 4645968..9214479 100644
--- a/util/optime.h
+++ b/util/optime.h
@@ -104,7 +104,7 @@ namespace mongo {
char buf[64];
time_t_to_String(secs, buf);
stringstream ss;
- ss << buf << ' ';
+ ss << time_t_to_String_short(secs) << ' ';
ss << hex << secs << ':' << i;
return ss.str();
}
diff --git a/util/sock.h b/util/sock.h
index 300c24d..4b4290d 100644
--- a/util/sock.h
+++ b/util/sock.h
@@ -253,9 +253,7 @@ namespace mongo {
class ListeningSockets {
public:
- ListeningSockets() : _mutex("ListeningSockets"), _sockets( new set<int>() ){
- }
-
+ ListeningSockets() : _mutex("ListeningSockets"), _sockets( new set<int>() ) { }
void add( int sock ){
scoped_lock lk( _mutex );
_sockets->insert( sock );
@@ -264,7 +262,6 @@ namespace mongo {
scoped_lock lk( _mutex );
_sockets->erase( sock );
}
-
void closeAll(){
set<int>* s;
{
@@ -272,17 +269,13 @@ namespace mongo {
s = _sockets;
_sockets = new set<int>();
}
-
- for ( set<int>::iterator i=s->begin(); i!=s->end(); i++ ){
+ for ( set<int>::iterator i=s->begin(); i!=s->end(); i++ ) {
int sock = *i;
- log() << "\t going to close listening socket: " << sock << endl;
+ log() << "closing listening socket: " << sock << endl;
closesocket( sock );
- }
-
+ }
}
-
static ListeningSockets* get();
-
private:
mongo::mutex _mutex;
set<int>* _sockets;
diff --git a/util/util.cpp b/util/util.cpp
index 02abfa9..b4b1053 100644
--- a/util/util.cpp
+++ b/util/util.cpp
@@ -156,20 +156,22 @@ namespace mongo {
}
printStackTrace();
}
-
+
+ /* note: can't use malloc herein - may be in signal handler.
+ logLockless() likely does not comply and should still be fixed todo
+ */
void rawOut( const string &s ) {
if( s.empty() ) return;
- boost::scoped_array<char> buf_holder(new char[32 + s.size()]);
- char * buf = buf_holder.get();
-
+ char buf[64];
time_t_to_String( time(0) , buf );
- buf[20] = ' ';
- strncpy( buf + 21 , s.c_str() , s.size() );
- buf[21+s.size()] = '\n';
- buf[21+s.size()+1] = 0;
+ /* truncate / don't show the year: */
+ buf[19] = ' ';
+ buf[20] = 0;
- Logstream::logLockless( buf );
+ Logstream::logLockless(buf);
+ Logstream::logLockless(s);
+ Logstream::logLockless("\n");
}
ostream& operator<<( ostream &s, const ThreadSafeString &o ){
diff --git a/util/version.cpp b/util/version.cpp
index bba3dd1..4987e19 100644
--- a/util/version.cpp
+++ b/util/version.cpp
@@ -14,7 +14,7 @@ namespace mongo {
// mongo processes version support
//
- const char versionString[] = "1.6.2";
+ const char versionString[] = "1.6.3";
string mongodVersion() {
stringstream ss;
@@ -61,26 +61,37 @@ namespace mongo {
// 32 bit systems warning
//
- void show_32_warning(){
+ void show_warnings(){
+ // each message adds a leading but not a trailing newline
+
bool warned = false;
{
const char * foo = strchr( versionString , '.' ) + 1;
int bar = atoi( foo );
if ( ( 2 * ( bar / 2 ) ) != bar ) {
cout << "\n** NOTE: This is a development version (" << versionString << ") of MongoDB.";
- cout << "\n** Not recommended for production. \n" << endl;
+ cout << "\n** Not recommended for production." << endl;
warned = true;
}
}
- if ( sizeof(int*) != 4 )
- return;
+ if ( sizeof(int*) == 4 ) {
+ cout << endl;
+ cout << "** NOTE: when using MongoDB 32 bit, you are limited to about 2 gigabytes of data" << endl;
+ cout << "** see http://blog.mongodb.org/post/137788967/32-bit-limitations" << endl;
+ warned = true;
+ }
+
+#ifdef __linux__
+ if (boost::filesystem::exists("/proc/vz") && !boost::filesystem::exists("/proc/bc")){
+ cout << endl;
+ cout << "** WARNING: You are running in OpenVZ. This is known to be broken!!!" << endl;
+ warned = true;
+ }
+#endif
- if( !warned ) // prettier this way
+ if (warned)
cout << endl;
- cout << "** NOTE: when using MongoDB 32 bit, you are limited to about 2 gigabytes of data" << endl;
- cout << "** see http://blog.mongodb.org/post/137788967/32-bit-limitations" << endl;
- cout << endl;
}
}
diff --git a/util/version.h b/util/version.h
index 70ddeb8..ea22a35 100644
--- a/util/version.h
+++ b/util/version.h
@@ -17,7 +17,7 @@ namespace mongo {
string sysInfo();
void printSysInfo();
- void show_32_warning();
+ void show_warnings();
} // namespace mongo