summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--SConstruct5
-rw-r--r--buildscripts/cleanbb.py12
-rw-r--r--buildscripts/utils.py12
-rw-r--r--client/dbclient.cpp1
-rw-r--r--client/dbclient_rs.cpp203
-rw-r--r--client/dbclient_rs.h44
-rw-r--r--client/dbclientcursor.cpp15
-rw-r--r--client/parallel.cpp14
-rw-r--r--client/redef_macros.h53
-rw-r--r--client/undef_macros.h32
-rw-r--r--db/cmdline.cpp4
-rw-r--r--db/commands/mr.cpp3
-rw-r--r--db/db.cpp16
-rw-r--r--db/dbcommands.cpp1
-rw-r--r--db/dur.cpp2
-rw-r--r--db/dur_commitjob.h4
-rw-r--r--db/ops/update.cpp28
-rw-r--r--db/ops/update.h3
-rw-r--r--db/pdfile.cpp5
-rw-r--r--db/pdfile.h2
-rw-r--r--db/repl/manager.cpp10
-rw-r--r--db/repl/rs.cpp6
-rw-r--r--dbtests/macrotests.cpp18
-rw-r--r--dbtests/updatetests.cpp45
-rw-r--r--doxygenConfig2
-rw-r--r--jstests/capped6.js2
-rw-r--r--jstests/capped8.js2
-rw-r--r--jstests/extent.js2
-rw-r--r--jstests/extent2.js31
-rw-r--r--jstests/indexb.js3
-rw-r--r--jstests/objid6.js6
-rw-r--r--jstests/repl/drop_dups.js9
-rw-r--r--jstests/replsets/reconfig.js18
-rw-r--r--jstests/replsets/replset_remove_node.js13
-rw-r--r--jstests/replsets/replsetprio1.js75
-rw-r--r--jstests/sharding/mongos_no_detect_sharding.js42
-rw-r--r--jstests/update3.js6
-rw-r--r--pch.h6
-rw-r--r--rpm/mongo.spec2
-rw-r--r--s/config.cpp97
-rw-r--r--s/config_migrate.cpp13
-rw-r--r--s/cursors.cpp16
-rw-r--r--s/d_migrate.cpp12
-rw-r--r--s/security.cpp4
-rw-r--r--s/server.cpp2
-rw-r--r--s/strategy_shard.cpp4
-rw-r--r--tools/top.cpp10
-rw-r--r--util/allocator.h2
-rw-r--r--util/assert_util.h9
-rw-r--r--util/logfile.cpp33
-rw-r--r--util/mmap_win.cpp37
-rw-r--r--util/version.cpp2
52 files changed, 718 insertions, 280 deletions
diff --git a/SConstruct b/SConstruct
index e536f8a..3d40050 100644
--- a/SConstruct
+++ b/SConstruct
@@ -714,7 +714,10 @@ if nix:
#make scons colorgcc friendly
env['ENV']['HOME'] = os.environ['HOME']
- env['ENV']['TERM'] = os.environ['TERM']
+ try:
+ env['ENV']['TERM'] = os.environ['TERM']
+ except KeyError:
+ pass
if linux and has_option( "sharedclient" ):
env.Append( LINKFLAGS=" -Wl,--as-needed -Wl,-zdefs " )
diff --git a/buildscripts/cleanbb.py b/buildscripts/cleanbb.py
index 261519a..bfeafd2 100644
--- a/buildscripts/cleanbb.py
+++ b/buildscripts/cleanbb.py
@@ -1,13 +1,15 @@
import sys
-import os
+import os, os.path
import utils
import time
from optparse import OptionParser
-cwd = os.getcwd();
-if cwd.find("buildscripts" ) > 0 :
- cwd = cwd.partition( "buildscripts" )[0]
+# set cwd to the root mongo dir, one level up from this
+# file's location (if we're not already running from there)
+cwd = os.getcwd()
+if os.path.basename(cwd) == 'buildscripts':
+ cwd = os.path.dirname(cwd)
print( "cwd [" + cwd + "]" )
@@ -38,7 +40,7 @@ def killprocs( signal="" ):
if not shouldKill( x ):
continue
- pid = x.partition( " " )[0]
+ pid = x.split( " " )[0]
print( "killing: " + x )
utils.execsys( "/bin/kill " + signal + " " + pid )
killed = killed + 1
diff --git a/buildscripts/utils.py b/buildscripts/utils.py
index 91409c7..bde2b08 100644
--- a/buildscripts/utils.py
+++ b/buildscripts/utils.py
@@ -3,6 +3,8 @@ import re
import socket
import time
import os
+import sys
+
# various utilities that are handy
def getAllSourceFiles( arr=None , prefix="." ):
@@ -139,6 +141,14 @@ def smoke_python_name():
# then we assume that "python" points to a 2.5 or
# greater python VM. otherwise, explicitly use 2.5
# which we assume to be installed.
+ min_version_tuple = (2, 5)
+ try:
+ if sys.version_info >= min_version_tuple:
+ return sys.executable
+ except AttributeError:
+ # In case the version of Python is somehow missing sys.version_info or sys.executable.
+ pass
+
import subprocess
version = re.compile(r'[Pp]ython ([\d\.]+)', re.MULTILINE)
binaries = ['python2.5', 'python2.6', 'python2.7', 'python25', 'python26', 'python27', 'python']
@@ -150,7 +160,7 @@ def smoke_python_name():
match = version.search(stream)
if match:
versiontuple = tuple(map(int, match.group(1).split('.')))
- if versiontuple >= (2, 5):
+ if versiontuple >= min_version_tuple:
return binary
except:
pass
diff --git a/client/dbclient.cpp b/client/dbclient.cpp
index 67ecea0..6b9631b 100644
--- a/client/dbclient.cpp
+++ b/client/dbclient.cpp
@@ -952,6 +952,7 @@ namespace mongo {
an exception. we should make it return void and just throw an exception anytime
it fails
*/
+ checkConnection();
try {
if ( !port().call(toSend, response) ) {
_failed = true;
diff --git a/client/dbclient_rs.cpp b/client/dbclient_rs.cpp
index 0189700..4a8112b 100644
--- a/client/dbclient_rs.cpp
+++ b/client/dbclient_rs.cpp
@@ -211,6 +211,7 @@ namespace mongo {
void ReplicaSetMonitor::notifyFailure( const HostAndPort& server ) {
scoped_lock lk( _lock );
+
if ( _master >= 0 && _master < (int)_nodes.size() ) {
if ( server == _nodes[_master].addr ) {
_nodes[_master].ok = false;
@@ -292,7 +293,7 @@ namespace mongo {
}
/**
- * notify the monitor that server has faild
+ * notify the monitor that server has failed
*/
void ReplicaSetMonitor::notifySlaveFailure( const HostAndPort& server ) {
int x = _find( server );
@@ -402,15 +403,15 @@ namespace mongo {
// Our host list may have changed while waiting for another thread in the meantime,
// so double-check here
- // TODO: Do we really need this much protection, this should be pretty rare and not triggered
- // from lots of threads, duping old behavior for safety
+ // TODO: Do we really need this much protection, this should be pretty rare and not
+ // triggered from lots of threads, duping old behavior for safety
if( ! _shouldChangeHosts( hostList, true ) ){
changed = false;
return;
}
- // LogLevel can be pretty low, since replica set reconfiguration should be pretty rare and we
- // want to record our changes
+ // LogLevel can be pretty low, since replica set reconfiguration should be pretty rare and
+ // we want to record our changes
log() << "changing hosts to " << hostList << " from " << _getServerAddress_inlock() << endl;
NodeDiff diff = _getHostDiff_inlock( hostList );
@@ -424,7 +425,6 @@ namespace mongo {
for( set<int>::reverse_iterator i = removed.rbegin(), end = removed.rend(); i != end; ++i ){
log() << "erasing host " << _nodes[ *i ] << " from replica set " << this->_name << endl;
-
_nodes.erase( _nodes.begin() + *i );
}
@@ -450,29 +450,52 @@ namespace mongo {
_nodes.push_back( Node( h , newConn ) );
}
-
}
-
- bool ReplicaSetMonitor::_checkConnection( DBClientConnection * c , string& maybePrimary , bool verbose , int nodesOffset ) {
- assert( c );
+ bool ReplicaSetMonitor::_checkConnection( DBClientConnection* conn,
+ string& maybePrimary, bool verbose, int nodesOffset ) {
+
+ assert( conn );
+
scoped_lock lk( _checkConnectionLock );
bool isMaster = false;
bool changed = false;
+ bool errorOccured = false;
+
+ if ( nodesOffset >= 0 ){
+ scoped_lock lk( _lock );
+ if ( !_checkConnMatch_inlock( conn, nodesOffset )) {
+ /* Another thread modified _nodes -> invariant broken.
+ * This also implies that another thread just passed
+ * through here and refreshed _nodes. So no need to do
+ * duplicate work.
+ */
+ return false;
+ }
+ }
+
try {
Timer t;
BSONObj o;
- c->isMaster(isMaster, &o);
+ conn->isMaster( isMaster, &o );
+
if ( o["setName"].type() != String || o["setName"].String() != _name ) {
- warning() << "node: " << c->getServerAddress() << " isn't a part of set: " << _name
+ warning() << "node: " << conn->getServerAddress()
+ << " isn't a part of set: " << _name
<< " ismaster: " << o << endl;
- if ( nodesOffset >= 0 )
+
+ if ( nodesOffset >= 0 ) {
+ scoped_lock lk( _lock );
_nodes[nodesOffset].ok = false;
+ }
+
return false;
}
if ( nodesOffset >= 0 ) {
+ scoped_lock lk( _lock );
+
_nodes[nodesOffset].pingTimeMillis = t.millis();
_nodes[nodesOffset].hidden = o["hidden"].trueValue();
_nodes[nodesOffset].secondary = o["secondary"].trueValue();
@@ -481,7 +504,8 @@ namespace mongo {
_nodes[nodesOffset].lastIsMaster = o.copy();
}
- log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: " << c->toString() << ' ' << o << endl;
+ log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: " << conn->toString()
+ << ' ' << o << endl;
// add other nodes
BSONArrayBuilder b;
@@ -492,18 +516,25 @@ namespace mongo {
BSONObjIterator it( o["hosts"].Obj() );
while( it.more() ) b.append( it.next() );
}
+
if (o.hasField("passives") && o["passives"].type() == Array) {
BSONObjIterator it( o["passives"].Obj() );
while( it.more() ) b.append( it.next() );
}
_checkHosts( b.arr(), changed);
- _checkStatus(c);
+ _checkStatus( conn );
-
}
catch ( std::exception& e ) {
- log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: caught exception " << c->toString() << ' ' << e.what() << endl;
+ log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: caught exception "
+ << conn->toString() << ' ' << e.what() << endl;
+
+ errorOccured = true;
+ }
+
+ if ( errorOccured ) {
+ scoped_lock lk( _lock );
_nodes[nodesOffset].ok = false;
}
@@ -514,63 +545,114 @@ namespace mongo {
}
void ReplicaSetMonitor::_check( bool checkAllSecondaries ) {
-
- bool triedQuickCheck = false;
-
LOG(1) << "_check : " << getServerAddress() << endl;
int newMaster = -1;
+ shared_ptr<DBClientConnection> nodeConn;
for ( int retry = 0; retry < 2; retry++ ) {
- for ( unsigned i=0; i<_nodes.size(); i++ ) {
- shared_ptr<DBClientConnection> c;
+ bool triedQuickCheck = false;
+
+ if ( !checkAllSecondaries ) {
+ scoped_lock lk( _lock );
+ if ( _master >= 0 ) {
+ /* Nothing else to do since another thread already
+ * found the _master
+ */
+ return;
+ }
+ }
+
+ for ( unsigned i = 0; /* should not check while outside of lock! */ ; i++ ) {
{
scoped_lock lk( _lock );
- c = _nodes[i].conn;
+ if ( i >= _nodes.size() ) break;
+ nodeConn = _nodes[i].conn;
}
string maybePrimary;
- if ( _checkConnection( c.get() , maybePrimary , retry , i ) ) {
- _master = i;
- newMaster = i;
- if ( ! checkAllSecondaries )
- return;
+ if ( _checkConnection( nodeConn.get(), maybePrimary, retry, i ) ) {
+ scoped_lock lk( _lock );
+ if ( _checkConnMatch_inlock( nodeConn.get(), i )) {
+ _master = i;
+ newMaster = i;
+
+ if ( !checkAllSecondaries )
+ return;
+ }
+ else {
+ /*
+ * Somebody modified _nodes and most likely set the new
+ * _master, so try again.
+ */
+ break;
+ }
}
- if ( ! triedQuickCheck && maybePrimary.size() ) {
- int x = _find( maybePrimary );
- if ( x >= 0 ) {
+
+ if ( ! triedQuickCheck && ! maybePrimary.empty() ) {
+ int probablePrimaryIdx = -1;
+ shared_ptr<DBClientConnection> probablePrimaryConn;
+
+ {
+ scoped_lock lk( _lock );
+ probablePrimaryIdx = _find_inlock( maybePrimary );
+ probablePrimaryConn = _nodes[probablePrimaryIdx].conn;
+ }
+
+ if ( probablePrimaryIdx >= 0 ) {
triedQuickCheck = true;
+
string dummy;
- shared_ptr<DBClientConnection> testConn;
- {
+ if ( _checkConnection( probablePrimaryConn.get(), dummy,
+ false, probablePrimaryIdx ) ) {
+
scoped_lock lk( _lock );
- testConn = _nodes[x].conn;
- }
- if ( _checkConnection( testConn.get() , dummy , false , x ) ) {
- _master = x;
- newMaster = x;
- if ( ! checkAllSecondaries )
- return;
+
+ if ( _checkConnMatch_inlock( probablePrimaryConn.get(),
+ probablePrimaryIdx )) {
+
+ _master = probablePrimaryIdx;
+ newMaster = probablePrimaryIdx;
+
+ if ( ! checkAllSecondaries )
+ return;
+ }
+ else {
+ /*
+ * Somebody modified _nodes and most likely set the
+ * new _master, so try again.
+ */
+ break;
+ }
}
}
}
-
}
if ( newMaster >= 0 )
return;
- sleepsecs(1);
+ sleepsecs( 1 );
}
-
}
void ReplicaSetMonitor::check( bool checkAllSecondaries ) {
- // first see if the current master is fine
- if ( _master >= 0 ) {
+ shared_ptr<DBClientConnection> masterConn;
+
+ {
+ scoped_lock lk( _lock );
+
+ // first see if the current master is fine
+ if ( _master >= 0 ) {
+ masterConn = _nodes[_master].conn;
+ }
+ }
+
+ if ( masterConn.get() != NULL ) {
string temp;
- if ( _checkConnection( _nodes[_master].conn.get() , temp , false , _master ) ) {
+
+ if ( _checkConnection( masterConn.get(), temp, false, _master )) {
if ( ! checkAllSecondaries ) {
// current master is fine, so we're done
return;
@@ -588,21 +670,17 @@ namespace mongo {
}
int ReplicaSetMonitor::_find_inlock( const string& server ) const {
- for ( unsigned i=0; i<_nodes.size(); i++ )
- if ( _nodes[i].addr == server )
- return i;
- return -1;
- }
-
+ const size_t size = _nodes.size();
- int ReplicaSetMonitor::_find( const HostAndPort& server ) const {
- scoped_lock lk( _lock );
- for ( unsigned i=0; i<_nodes.size(); i++ )
- if ( _nodes[i].addr == server )
+ for ( unsigned i = 0; i < size; i++ ) {
+ if ( _nodes[i].addr == server ) {
return i;
+ }
+ }
+
return -1;
}
-
+
void ReplicaSetMonitor::appendInfo( BSONObjBuilder& b ) const {
scoped_lock lk( _lock );
BSONArrayBuilder hosts( b.subarrayStart( "hosts" ) );
@@ -622,6 +700,13 @@ namespace mongo {
b.append( "nextSlave" , _nextSlave );
}
+ bool ReplicaSetMonitor::_checkConnMatch_inlock( DBClientConnection* conn,
+ size_t nodeOffset ) const {
+
+ return ( nodeOffset < _nodes.size() &&
+ conn->getServerAddress() == _nodes[nodeOffset].conn->getServerAddress() );
+ }
+
mongo::mutex ReplicaSetMonitor::_setsLock( "ReplicaSetMonitor" );
map<string,ReplicaSetMonitorPtr> ReplicaSetMonitor::_sets;
@@ -645,6 +730,7 @@ namespace mongo {
// a master is selected. let's just make sure connection didn't die
if ( ! _master->isFailed() )
return _master.get();
+
_monitor->notifyFailure( _masterHost );
}
@@ -687,7 +773,6 @@ namespace mongo {
warning() << "cached auth failed for set: " << _monitor->getName() << " db: " << a.dbname << " user: " << a.username << endl;
}
-
}
DBClientConnection& DBClientReplicaSet::masterConn() {
@@ -794,6 +879,8 @@ namespace mongo {
}
auto_ptr<DBClientCursor> DBClientReplicaSet::checkSlaveQueryResult( auto_ptr<DBClientCursor> result ){
+ if ( result.get() == NULL ) return result;
+
BSONObj error;
bool isError = result->peekError( &error );
if( ! isError ) return result;
diff --git a/client/dbclient_rs.h b/client/dbclient_rs.h
index b68af29..bf91f09 100644
--- a/client/dbclient_rs.h
+++ b/client/dbclient_rs.h
@@ -107,6 +107,13 @@ namespace mongo {
*/
ReplicaSetMonitor( const string& name , const vector<HostAndPort>& servers );
+ /**
+ * Checks all connections from the host list and sets the current
+ * master.
+ *
+ * @param checkAllSecondaries if set to false, stop immediately when
+ * the master is found or when _master is not -1.
+ */
void _check( bool checkAllSecondaries );
/**
@@ -125,25 +132,50 @@ namespace mongo {
/**
* Updates host list.
- * @param c the connection to check
+ * Invariant: if nodesOffset is >= 0, _nodes[nodesOffset].conn should be
+ * equal to conn.
+ *
+ * @param conn the connection to check
* @param maybePrimary OUT
* @param verbose
* @param nodesOffset - offset into _nodes array, -1 for not in it
- * @return if the connection is good
+ *
+ * @return true if the connection is good or false if invariant
+ * is broken
*/
- bool _checkConnection( DBClientConnection * c , string& maybePrimary , bool verbose , int nodesOffset );
+ bool _checkConnection( DBClientConnection* conn, string& maybePrimary,
+ bool verbose, int nodesOffset );
string _getServerAddress_inlock() const;
NodeDiff _getHostDiff_inlock( const BSONObj& hostList );
bool _shouldChangeHosts( const BSONObj& hostList, bool inlock );
-
+ /**
+ * @return the index to _nodes corresponding to the server address.
+ */
int _find( const string& server ) const ;
int _find_inlock( const string& server ) const ;
- int _find( const HostAndPort& server ) const ;
- mutable mongo::mutex _lock; // protects _nodes
+ /**
+ * Checks whether the given connection matches the connection stored in _nodes.
+ * Mainly used for sanity checking to confirm that nodeOffset still
+ * refers to the right connection after releasing and reacquiring
+ * a mutex.
+ */
+ bool _checkConnMatch_inlock( DBClientConnection* conn, size_t nodeOffset ) const;
+
+ // protects _nodes and indices pointing to it (_master & _nextSlave)
+ mutable mongo::mutex _lock;
+
+ /**
+ * "Synchronizes" the _checkConnection method. Should ideally be one mutex per
+ * connection object being used. The purpose of this lock is to make sure that
+ * the reply from the connection the lock holder got is the actual response
+ * to what it sent.
+ *
+ * Deadlock WARNING: never acquire this while holding _lock
+ */
mutable mongo::mutex _checkConnectionLock;
string _name;
diff --git a/client/dbclientcursor.cpp b/client/dbclientcursor.cpp
index 5db360e..9e7e8a6 100644
--- a/client/dbclientcursor.cpp
+++ b/client/dbclientcursor.cpp
@@ -290,12 +290,23 @@ namespace mongo {
m.setData( dbKillCursors , b.buf() , b.len() );
if ( _client ) {
- _client->sayPiggyBack( m );
+
+ // Kill the cursor the same way the connection itself would. Usually, non-lazily
+ if( DBClientConnection::getLazyKillCursor() )
+ _client->sayPiggyBack( m );
+ else
+ _client->say( m );
+
}
else {
assert( _scopedHost.size() );
ScopedDbConnection conn( _scopedHost );
- conn->sayPiggyBack( m );
+
+ if( DBClientConnection::getLazyKillCursor() )
+ conn->sayPiggyBack( m );
+ else
+ conn->say( m );
+
conn.done();
}
}
diff --git a/client/parallel.cpp b/client/parallel.cpp
index 3a33eb5..d7975a6 100644
--- a/client/parallel.cpp
+++ b/client/parallel.cpp
@@ -503,7 +503,19 @@ namespace mongo {
0 , // nToSkip
_fields.isEmpty() ? 0 : &_fields , // fieldsToReturn
_options ,
- _batchSize == 0 ? 0 : _batchSize + _needToSkip // batchSize
+ // NtoReturn is weird.
+ // If zero, it means use default size, so we do that for all cursors
+ // If positive, it's the batch size (we don't want this cursor limiting results), tha
+ // done at a higher level
+ // If negative, it's the batch size, but we don't create a cursor - so we don't want
+ // to create a child cursor either.
+ // Either way, if non-zero, we want to pull back the batch size + the skip amount as
+ // quickly as possible. Potentially, for a cursor on a single shard or if we keep be
+ // chunks, we can actually add the skip value into the cursor and/or make some assump
+ // return value size ( (batch size + skip amount) / num_servers ).
+ _batchSize == 0 ? 0 :
+ ( _batchSize > 0 ? _batchSize + _needToSkip :
+ _batchSize - _needToSkip ) // batchSize
) );
try{
diff --git a/client/redef_macros.h b/client/redef_macros.h
index 897912d..5a39561 100644
--- a/client/redef_macros.h
+++ b/client/redef_macros.h
@@ -1,4 +1,4 @@
-/** @file redef_macros.h macros the implementation uses.
+/** @file redef_macros.h macros for mongo internals
@see undef_macros.h undefines these after use to minimize name pollution.
*/
@@ -20,42 +20,83 @@
// If you define a new global un-prefixed macro, please add it here and in undef_macros
-// #pragma once // this file is intended to be processed multiple times
-
-#if defined(MONGO_MACROS_CLEANED)
+#define MONGO_MACROS_PUSHED 1
// util/allocator.h
+#pragma push_macro("malloc")
+#undef malloc
#define malloc MONGO_malloc
+#pragma push_macro("realloc")
+#undef realloc
#define realloc MONGO_realloc
// util/assert_util.h
+#pragma push_macro("assert")
+#undef assert
#define assert MONGO_assert
+#pragma push_macro("verify")
+#undef verify
+#define verify MONGO_verify
+#pragma push_macro("dassert")
+#undef dassert
#define dassert MONGO_dassert
+#pragma push_macro("wassert")
+#undef wassert
#define wassert MONGO_wassert
+#pragma push_macro("massert")
+#undef massert
#define massert MONGO_massert
+#pragma push_macro("uassert")
+#undef uassert
#define uassert MONGO_uassert
#define BOOST_CHECK_EXCEPTION MONGO_BOOST_CHECK_EXCEPTION
+#pragma push_macro("DESTRUCTOR_GUARD")
+#undef DESTRUCTOR_GUARD
#define DESTRUCTOR_GUARD MONGO_DESTRUCTOR_GUARD
// util/goodies.h
+#pragma push_macro("PRINT")
+#undef PRINT
#define PRINT MONGO_PRINT
+#pragma push_macro("PRINTFL")
+#undef PRINTFL
#define PRINTFL MONGO_PRINTFL
+#pragma push_macro("asctime")
+#undef asctime
#define asctime MONGO_asctime
+#pragma push_macro("gmtime")
+#undef gmtime
#define gmtime MONGO_gmtime
+#pragma push_macro("localtime")
+#undef localtime
#define localtime MONGO_localtime
+#pragma push_macro("ctime")
+#undef ctime
#define ctime MONGO_ctime
// util/debug_util.h
+#pragma push_macro("DEV")
+#undef DEV
#define DEV MONGO_DEV
+#pragma push_macro("DEBUGGING")
+#undef DEBUGGING
#define DEBUGGING MONGO_DEBUGGING
+#pragma push_macro("SOMETIMES")
+#undef SOMETIMES
#define SOMETIMES MONGO_SOMETIMES
+#pragma push_macro("OCCASIONALLY")
+#undef OCCASIONALLY
#define OCCASIONALLY MONGO_OCCASIONALLY
+#pragma push_macro("RARELY")
+#undef RARELY
#define RARELY MONGO_RARELY
+#pragma push_macro("ONCE")
+#undef ONCE
#define ONCE MONGO_ONCE
// util/log.h
+#pragma push_macro("LOG")
+#undef LOG
#define LOG MONGO_LOG
-#undef MONGO_MACROS_CLEANED
-#endif
diff --git a/client/undef_macros.h b/client/undef_macros.h
index 30ece61..c880f06 100644
--- a/client/undef_macros.h
+++ b/client/undef_macros.h
@@ -19,43 +19,65 @@
// #pragma once // this file is intended to be processed multiple times
+#if !defined (MONGO_EXPOSE_MACROS)
-/** MONGO_EXPOSE_MACROS - when defined, indicates that you are compiling a mongo program rather
- than just using the C++ driver.
-*/
-#if !defined(MONGO_EXPOSE_MACROS) && !defined(MONGO_MACROS_CLEANED)
+#ifdef MONGO_MACROS_PUSHED
// util/allocator.h
#undef malloc
+#pragma pop_macro("malloc")
#undef realloc
+#pragma pop_macro("realloc")
// util/assert_util.h
#undef assert
+#pragma pop_macro("assert")
#undef dassert
+#pragma pop_macro("dassert")
#undef wassert
+#pragma pop_macro("wassert")
#undef massert
+#pragma pop_macro("massert")
#undef uassert
+#pragma pop_macro("uassert")
#undef BOOST_CHECK_EXCEPTION
+#undef verify
+#pragma pop_macro("verify")
#undef DESTRUCTOR_GUARD
+#pragma pop_macro("DESTRUCTOR_GUARD")
// util/goodies.h
#undef PRINT
+#pragma pop_macro("PRINT")
#undef PRINTFL
+#pragma pop_macro("PRINTFL")
#undef asctime
+#pragma pop_macro("asctime")
#undef gmtime
+#pragma pop_macro("gmtime")
#undef localtime
+#pragma pop_macro("localtime")
#undef ctime
+#pragma pop_macro("ctime")
// util/debug_util.h
#undef DEV
+#pragma pop_macro("DEV")
#undef DEBUGGING
+#pragma pop_macro("DEBUGGING")
#undef SOMETIMES
+#pragma pop_macro("SOMETIMES")
#undef OCCASIONALLY
+#pragma pop_macro("OCCASIONALLY")
#undef RARELY
+#pragma pop_macro("RARELY")
#undef ONCE
+#pragma pop_macro("ONCE")
// util/log.h
#undef LOG
+#pragma pop_macro("LOG")
-#define MONGO_MACROS_CLEANED
+#undef MONGO_MACROS_PUSHED
+#endif
#endif
diff --git a/db/cmdline.cpp b/db/cmdline.cpp
index fd759a7..346a9ae 100644
--- a/db/cmdline.cpp
+++ b/db/cmdline.cpp
@@ -239,7 +239,7 @@ namespace mongo {
cmdLine.noUnixSocket = true;
}
- if (params.count("fork")) {
+ if (params.count("fork") && !params.count("shutdown")) {
if ( ! params.count( "logpath" ) ) {
cout << "--fork has to be used with --logpath" << endl;
::exit(-1);
@@ -304,7 +304,7 @@ namespace mongo {
}
#endif
- if (params.count("logpath")) {
+ if (params.count("logpath") && !params.count("shutdown")) {
if ( logpath.size() == 0 )
logpath = params["logpath"].as<string>();
uassert( 10033 , "logpath has to be non-zero" , logpath.size() );
diff --git a/db/commands/mr.cpp b/db/commands/mr.cpp
index b79e62b..30fa2a4 100644
--- a/db/commands/mr.cpp
+++ b/db/commands/mr.cpp
@@ -940,6 +940,7 @@ namespace mongo {
log(1) << "mr ns: " << config.ns << endl;
+ uassert( 16149 , "cannot run map reduce without the js engine", globalScriptEngine );
bool shouldHaveData = false;
long long num = 0;
@@ -1206,7 +1207,7 @@ namespace mongo {
BSONObj res = config.reducer->finalReduce( values , config.finalizer.get());
if (state.isOnDisk())
- state.insertToInc(res);
+ state.insert( config.tempLong , res );
else
state.emit(res);
values.clear();
diff --git a/db/db.cpp b/db/db.cpp
index b1d1db8..0a21f4d 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -989,22 +989,6 @@ int main(int argc, char* argv[]) {
procPath = (str::stream() << "/proc/" << pid);
if (!boost::filesystem::exists(procPath))
failed = true;
-
- string exePath = procPath + "/exe";
- if (boost::filesystem::exists(exePath)){
- char buf[256];
- int ret = readlink(exePath.c_str(), buf, sizeof(buf)-1);
- buf[ret] = '\0'; // readlink doesn't terminate string
- if (ret == -1) {
- int e = errno;
- cerr << "Error resolving " << exePath << ": " << errnoWithDescription(e);
- failed = true;
- }
- else if (!endsWith(buf, "mongod")){
- cerr << "Process " << pid << " is running " << buf << " not mongod" << endl;
- ::exit(-1);
- }
- }
}
catch (const std::exception& e){
cerr << "Error reading pid from lock file [" << name << "]: " << e.what() << endl;
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index fc6327c..fb6a902 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -1760,6 +1760,7 @@ namespace mongo {
virtual bool slaveOk() const { return false; }
virtual LockType locktype() const { return WRITE; }
virtual bool requiresAuth() { return true; }
+ virtual bool logTheOp() { return true; }
virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
string coll = cmdObj[ "emptycapped" ].valuestrsafe();
uassert( 13428, "emptycapped must specify a collection", !coll.empty() );
diff --git a/db/dur.cpp b/db/dur.cpp
index 4861773..3e2c3ec 100644
--- a/db/dur.cpp
+++ b/db/dur.cpp
@@ -83,7 +83,7 @@ namespace mongo {
*/
static void groupCommit();
- CommitJob commitJob;
+ CommitJob& commitJob = *(new CommitJob()); // don't destroy
Stats stats;
diff --git a/db/dur_commitjob.h b/db/dur_commitjob.h
index a5f8515..ab62c2a 100644
--- a/db/dur_commitjob.h
+++ b/db/dur_commitjob.h
@@ -164,6 +164,8 @@ namespace mongo {
CommitJob();
+ ~CommitJob(){ assert(!"shouldn't destroy CommitJob!"); }
+
/** record/note an intent to write */
void note(void* p, int len);
@@ -212,7 +214,7 @@ namespace mongo {
unsigned _nSinceCommitIfNeededCall;
};
- extern CommitJob commitJob;
+ extern CommitJob& commitJob;
}
}
diff --git a/db/ops/update.cpp b/db/ops/update.cpp
index 6a7aad4..4fb8750 100644
--- a/db/ops/update.cpp
+++ b/db/ops/update.cpp
@@ -642,6 +642,14 @@ namespace mongo {
}
+ bool ModSetState::duplicateFieldName( const BSONElement &a, const BSONElement &b ) {
+ return
+ !a.eoo() &&
+ !b.eoo() &&
+ ( a.rawdata() != b.rawdata() ) &&
+ ( a.fieldName() == string( b.fieldName() ) );
+ }
+
template< class Builder >
void ModSetState::createNewFromMods( const string& root , Builder& b , const BSONObj &obj ) {
DEBUGUPDATE( "\t\t createNewFromMods root: " << root );
@@ -654,8 +662,18 @@ namespace mongo {
ModStateHolder::iterator mend = _mods.lower_bound( buf.str() );
set<string> onedownseen;
-
+ BSONElement prevE;
while ( e.type() && m != mend ) {
+
+ if ( duplicateFieldName( prevE, e ) ) {
+ // Just copy through an element with a duplicate field name.
+ b.append( e );
+ prevE = e;
+ e = es.next();
+ continue;
+ }
+ prevE = e;
+
string field = root + e.fieldName();
FieldCompareResult cmp = compareDottedFieldNames( m->second.m->fieldName , field );
@@ -684,11 +702,9 @@ namespace mongo {
m++;
}
else {
- // this is a very weird case
- // have seen it in production, but can't reproduce
- // this assert prevents an inf. loop
- // but likely isn't the correct solution
- assert(0);
+ massert( 16062 , "ModSet::createNewFromMods - "
+ "SERVER-4777 unhandled duplicate field" , 0 );
+
}
continue;
}
diff --git a/db/ops/update.h b/db/ops/update.h
index de5805a..73e4437 100644
--- a/db/ops/update.h
+++ b/db/ops/update.h
@@ -623,6 +623,9 @@ namespace mongo {
}
+ /** @return true iff the elements aren't eoo(), are distinct, and share a field name. */
+ static bool duplicateFieldName( const BSONElement &a, const BSONElement &b );
+
public:
bool canApplyInPlace() const {
diff --git a/db/pdfile.cpp b/db/pdfile.cpp
index ac7731a..60914d9 100644
--- a/db/pdfile.cpp
+++ b/db/pdfile.cpp
@@ -434,6 +434,7 @@ namespace mongo {
}
Extent* MongoDataFile::createExtent(const char *ns, int approxSize, bool newCapped, int loops) {
+ assert( approxSize <= Extent::maxSize() );
{
// make sizes align with VM page size
int newSize = (approxSize + 0xfff) & 0xfffff000;
@@ -491,6 +492,10 @@ namespace mongo {
// overflowed
high = max(approxSize, Extent::maxSize());
}
+ if ( high <= Extent::minSize() ) {
+ // the minimum extent size is 4097
+ high = Extent::minSize() + 1;
+ }
int n = 0;
Extent *best = 0;
int bestDiff = 0x7fffffff;
diff --git a/db/pdfile.h b/db/pdfile.h
index 64dba68..2652f54 100644
--- a/db/pdfile.h
+++ b/db/pdfile.h
@@ -313,7 +313,7 @@ namespace mongo {
Extent* getPrevExtent() { return xprev.isNull() ? 0 : DataFileMgr::getExtent(xprev); }
static int maxSize();
- static int minSize() { return 0x100; }
+ static int minSize() { return 0x1000; }
/**
* @param len lengt of record we need
* @param lastRecord size of last extent which is a factor in next extent size
diff --git a/db/repl/manager.cpp b/db/repl/manager.cpp
index c91adc3..fb9b6cb 100644
--- a/db/repl/manager.cpp
+++ b/db/repl/manager.cpp
@@ -98,8 +98,14 @@ namespace mongo {
const Member *primary = rs->box.getPrimary();
if (primary && highestPriority &&
- highestPriority->config().priority > primary->config().priority) {
- log() << "stepping down " << primary->fullName() << endl;
+ highestPriority->config().priority > primary->config().priority &&
+ // if we're stepping down to allow another member to become primary, we
+ // better have another member (otherOp), and it should be up-to-date
+ otherOp != 0 && highestPriority->hbinfo().opTime.getSecs() >= otherOp - 10) {
+ log() << "stepping down " << primary->fullName() << " (priority " <<
+ primary->config().priority << "), " << highestPriority->fullName() <<
+ " is priority " << highestPriority->config().priority << " and " <<
+ (otherOp - highestPriority->hbinfo().opTime.getSecs()) << " seconds behind" << endl;
if (primary->h().isSelf()) {
// replSetStepDown tries to acquire the same lock
diff --git a/db/repl/rs.cpp b/db/repl/rs.cpp
index 23abc24..9181fe9 100644
--- a/db/repl/rs.cpp
+++ b/db/repl/rs.cpp
@@ -119,8 +119,6 @@ namespace mongo {
return max;
}
- const bool closeOnRelinquish = true;
-
void ReplSetImpl::relinquish() {
LOG(2) << "replSet attempting to relinquish" << endl;
if( box.getState().primary() ) {
@@ -129,9 +127,7 @@ namespace mongo {
log() << "replSet relinquishing primary state" << rsLog;
changeState(MemberState::RS_SECONDARY);
- }
-
- if( closeOnRelinquish ) {
+
/* close sockets that were talking to us so they don't blithly send many writes that will fail
with "not master" (of course client could check result code, but in case they are not)
*/
diff --git a/dbtests/macrotests.cpp b/dbtests/macrotests.cpp
index f547c85..36167b0 100644
--- a/dbtests/macrotests.cpp
+++ b/dbtests/macrotests.cpp
@@ -22,26 +22,16 @@
# error malloc defined 0
#endif
-#ifdef assert
-# error assert defined 1
-#endif
-
-#include "../client/parallel.h" //uses assert
-
-#ifdef assert
-# error assert defined 2
-#endif
-
#include "../client/redef_macros.h"
-#ifndef assert
-# error assert not defined 3
+#ifndef malloc
+# error malloc not defined
#endif
#include "../client/undef_macros.h"
-#ifdef assert
-# error assert defined 3
+#ifdef malloc
+# error malloc defined 1
#endif
diff --git a/dbtests/updatetests.cpp b/dbtests/updatetests.cpp
index c912bf4..95476ef 100644
--- a/dbtests/updatetests.cpp
+++ b/dbtests/updatetests.cpp
@@ -519,6 +519,48 @@ namespace UpdateTests {
}
};
+ /** SERVER-4777 */
+ class TwoModsWithinDuplicatedField : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), BSON( "_id" << 0 << "a" << 1
+ << "x" << BSONObj() << "x" << BSONObj()
+ << "z" << 5 ) );
+ client().update( ns(), BSONObj(), BSON( "$set" << BSON( "x.b" << 1 << "x.c" << 1 ) ) );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 1
+ << "x" << BSON( "b" << 1 << "c" << 1 ) << "x" << BSONObj()
+ << "z" << 5 ),
+ client().findOne( ns(), BSONObj() ) );
+ }
+ };
+
+ /** SERVER-4777 */
+ class ThreeModsWithinDuplicatedField : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(),
+ BSON( "_id" << 0
+ << "x" << BSONObj() << "x" << BSONObj() << "x" << BSONObj() ) );
+ client().update( ns(), BSONObj(),
+ BSON( "$set" << BSON( "x.b" << 1 << "x.c" << 1 << "x.d" << 1 ) ) );
+ ASSERT_EQUALS( BSON( "_id" << 0
+ << "x" << BSON( "b" << 1 << "c" << 1 << "d" << 1 )
+ << "x" << BSONObj() << "x" << BSONObj() ),
+ client().findOne( ns(), BSONObj() ) );
+ }
+ };
+
+ class TwoModsBeforeExistingField : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), BSON( "_id" << 0 << "x" << 5 ) );
+ client().update( ns(), BSONObj(),
+ BSON( "$set" << BSON( "a" << 1 << "b" << 1 << "x" << 10 ) ) );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 1 << "b" << 1 << "x" << 10 ),
+ client().findOne( ns(), BSONObj() ) );
+ }
+ };
+
namespace ModSetTests {
class internal1 {
@@ -854,6 +896,9 @@ namespace UpdateTests {
add< PreserveIdWithIndex >();
add< CheckNoMods >();
add< UpdateMissingToNull >();
+ add< TwoModsWithinDuplicatedField >();
+ add< ThreeModsWithinDuplicatedField >();
+ add< TwoModsBeforeExistingField >();
add< ModSetTests::internal1 >();
add< ModSetTests::inc1 >();
diff --git a/doxygenConfig b/doxygenConfig
index 55cee20..afe9177 100644
--- a/doxygenConfig
+++ b/doxygenConfig
@@ -3,7 +3,7 @@
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = MongoDB
-PROJECT_NUMBER = 2.0.4
+PROJECT_NUMBER = 2.0.5
OUTPUT_DIRECTORY = docs/doxygen
CREATE_SUBDIRS = NO
OUTPUT_LANGUAGE = English
diff --git a/jstests/capped6.js b/jstests/capped6.js
index 098f667..5db12b2 100644
--- a/jstests/capped6.js
+++ b/jstests/capped6.js
@@ -105,3 +105,5 @@ tzz = db.capped6;
for( var i = 0; i < 10; ++i ) {
doTest();
}
+
+tzz.drop();
diff --git a/jstests/capped8.js b/jstests/capped8.js
index e5b28dc..0f30e37 100644
--- a/jstests/capped8.js
+++ b/jstests/capped8.js
@@ -104,3 +104,5 @@ print("pass " + pass++);
t.drop();
db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000 ] } );
testTruncate();
+
+t.drop();
diff --git a/jstests/extent.js b/jstests/extent.js
index 8fca699..47ae868 100644
--- a/jstests/extent.js
+++ b/jstests/extent.js
@@ -7,5 +7,5 @@ for ( var i=0; i<50; i++ ) { // enough iterations to break 32 bit.
assert( t.count() == 1 );
t.drop();
}
+t.drop();
-db.dropDatabase();
diff --git a/jstests/extent2.js b/jstests/extent2.js
new file mode 100644
index 0000000..884c3ca
--- /dev/null
+++ b/jstests/extent2.js
@@ -0,0 +1,31 @@
+
+
+mydb = db.getSisterDB( "test_extent2" );
+mydb.dropDatabase();
+
+t = mydb.foo;
+e = mydb["$freelist"]
+
+function insert(){
+ t.insert( { _id : 1 , x : 1 } )
+ t.insert( { _id : 2 , x : 1 } )
+ t.insert( { _id : 3 , x : 1 } )
+ t.ensureIndex( { x : 1 } );
+}
+
+insert();
+t.drop();
+
+start = e.stats();
+
+for ( i=0; i<100; i++ ) {
+ insert();
+ t.drop();
+}
+
+end = e.stats();
+
+printjson( start );
+printjson( end )
+assert.eq( 4 , start.numExtents );
+assert.eq( 4 , end.numExtents );
diff --git a/jstests/indexb.js b/jstests/indexb.js
index 5507fee..d7d2e8c 100644
--- a/jstests/indexb.js
+++ b/jstests/indexb.js
@@ -5,8 +5,7 @@
// when it doesn't move
-t = db.indexb;t = db.indexb;
-db.dropDatabase();
+t = db.indexb;
t.drop();
t.ensureIndex({a:1},true);
diff --git a/jstests/objid6.js b/jstests/objid6.js
index c414ff0..b90dc9e 100644
--- a/jstests/objid6.js
+++ b/jstests/objid6.js
@@ -6,9 +6,11 @@ b = new ObjectId("4c17f616a707428966a2801c");
assert.eq(a.getTimestamp(), b.getTimestamp() , "A" );
x = Math.floor( (new Date()).getTime() / 1000 );
+sleep(10/*ms*/)
a = new ObjectId();
+sleep(10/*ms*/)
z = Math.floor( (new Date()).getTime() / 1000 );
y = a.getTimestamp().getTime() / 1000;
-assert( x <= y , "B" );
-assert( y <= z , "C" );
+assert.lte( x , y , "B" );
+assert.lte( y , z , "C" );
diff --git a/jstests/repl/drop_dups.js b/jstests/repl/drop_dups.js
index 100f469..ccfb768 100644
--- a/jstests/repl/drop_dups.js
+++ b/jstests/repl/drop_dups.js
@@ -40,14 +40,7 @@ function run( createInBackground ) {
function mymap(z) {
return z._id + ":" + z.x + ",";
- }
-
-
- if ( am.serverStatus().mem.bits == 64 ) {
- assert.neq( tojson(am[collName].find().map(mymap)) ,
- tojson(as[collName].find().map(mymap)) , "order is not supposed to be same on master and slave but it is" );
- }
-
+ }
am[collName].ensureIndex( { x : 1 } , { unique : true , dropDups : true , background : createInBackground } );
am.blah.insert( { x : 1 } )
diff --git a/jstests/replsets/reconfig.js b/jstests/replsets/reconfig.js
index 55ee505..7d43720 100644
--- a/jstests/replsets/reconfig.js
+++ b/jstests/replsets/reconfig.js
@@ -60,10 +60,18 @@ replTest.stopSet();
replTest2 = new ReplSetTest({name : 'testSet2', nodes : 1});
nodes = replTest2.startSet();
-result = nodes[0].getDB("admin").runCommand({replSetInitiate : {_id : "testSet2", members : [
- {_id : 0, tags : ["member0"]}
- ]}});
-
-assert(result.errmsg.match(/bad or missing host field/) , "error message doesn't match, got result:" + tojson(result) );
+assert.soon(function() {
+ try {
+ result = nodes[0].getDB("admin").runCommand({replSetInitiate : {_id : "testSet2", members : [
+ {_id : 0, tags : ["member0"]}
+ ]}});
+ printjson(result);
+ return result.errmsg.match(/bad or missing host field/);
+ }
+ catch (e) {
+ print(e);
+ }
+ return false;
+});
replTest2.stopSet();
diff --git a/jstests/replsets/replset_remove_node.js b/jstests/replsets/replset_remove_node.js
index 9fef721..bf99b12 100644
--- a/jstests/replsets/replset_remove_node.js
+++ b/jstests/replsets/replset_remove_node.js
@@ -1,6 +1,6 @@
doTest = function( signal ) {
- // Make sure that we can manually shutdown a remove a
+ // Make sure that we can manually shutdown a remove a
// slave from the configuration.
// Create a new replica set test. Specify set name and the number of nodes you want.
@@ -12,7 +12,14 @@ doTest = function( signal ) {
// Call initiate() to send the replSetInitiate command
// This will wait for initiation
- replTest.initiate();
+ var name = replTest.nodeList();
+ replTest.initiate({"_id" : "testSet",
+ "members" : [
+ // make sure 0 becomes primary so we don't try to remove the
+ // primary below
+ {"_id" : 0, "host" : name[0], priority:2},
+ {"_id" : 1, "host" : name[1]},
+ {"_id" : 2, "host" : name[2]}]});
// Call getMaster to return a reference to the node that's been
// elected master.
@@ -41,7 +48,7 @@ doTest = function( signal ) {
print(e);
}
-
+
// Make sure that a new master comes up
master = replTest.getMaster();
slaves = replTest.liveNodes.slaves;
diff --git a/jstests/replsets/replsetprio1.js b/jstests/replsets/replsetprio1.js
index a002476..aeeb749 100644
--- a/jstests/replsets/replsetprio1.js
+++ b/jstests/replsets/replsetprio1.js
@@ -1,4 +1,3 @@
-// FAILING TEST
// should check that election happens in priority order
doTest = function( signal ) {
@@ -7,47 +6,55 @@ doTest = function( signal ) {
var nodes = replTest.nodeList();
replTest.startSet();
- replTest.node[0].initiate({"_id" : "unicomplex",
+ replTest.initiate({"_id" : "testSet",
"members" : [
- {"_id" : 0, "host" : nodes[0], "priority" : 1},
- {"_id" : 1, "host" : nodes[1], "priority" : 2},
+ {"_id" : 0, "host" : nodes[0], "priority" : 1},
+ {"_id" : 1, "host" : nodes[1], "priority" : 2},
{"_id" : 2, "host" : nodes[2], "priority" : 3}]});
- sleep(10000);
-
- // 2 should be master
- var m3 = replTest.nodes[2].runCommand({ismaster:1})
-
- // FAILS: node[0] is elected master, regardless of priority
- assert(m3.ismaster, 'highest priority is master');
+ // 2 should be master (give this a while to happen, as 0 will be elected, then demoted)
+ assert.soon(function() {
+ var m2 = replTest.nodes[2].getDB("admin").runCommand({ismaster:1});
+ return m2.ismaster;
+ }, 'highest priority is master', 120000);
// kill 2, 1 should take over
- var m3Id = replTest.getNodeId(nodes[2]);
- replTest.stop(m3Id);
-
- sleep(10000);
-
- var m2 = replTest.nodes[1].runCommand({ismaster:1})
- assert(m2.ismaster, 'node 2 is master');
+ replTest.stop(2);
- // bring 2 back up, nothing should happen
- replTest.start(m3Id);
+ // do some writes on 1
+ master = replTest.getMaster();
+ for (i=0; i<1000; i++) {
+ master.getDB("foo").bar.insert({i:i});
+ }
sleep(10000);
- m2 = replTest.nodes[1].runCommand({ismaster:1})
- assert(m2.ismaster, 'node 2 is still master');
-
- // kill 1, 2 should become master
- var m2Id = replTest.getNodeId(nodes[1]);
- replTest.stop(m2Id);
-
- sleep(10000);
-
- m3 = replTest.nodes[2].runCommand({ismaster:1})
- assert(m3.ismaster, 'node 3 is master');
-
- replTest.stopSet( signal );
+ for (i=0; i<1000; i++) {
+ master.getDB("bar").baz.insert({i:i});
+ }
+
+ var m1 = replTest.nodes[1].getDB("admin").runCommand({ismaster:1})
+ assert(m1.ismaster, 'node 2 is master');
+
+ // bring 2 back up, 2 should wait until caught up and then become master
+ replTest.restart(2);
+ assert.soon(function() {
+ try {
+ m2 = replTest.nodes[2].getDB("admin").runCommand({ismaster:1})
+ return m2.ismaster;
+ }
+ catch (e) {
+ print(e);
+ }
+ return false;
+ }, 'node 2 is master again');
+
+ // make sure nothing was rolled back
+ master = replTest.getMaster();
+ for (i=0; i<1000; i++) {
+ assert(master.getDB("foo").bar.findOne({i:i}) != null, 'checking '+i);
+ assert(master.getDB("bar").baz.findOne({i:i}) != null, 'checking '+i);
+ }
}
-//doTest( 15 );
+doTest( 15 );
diff --git a/jstests/sharding/mongos_no_detect_sharding.js b/jstests/sharding/mongos_no_detect_sharding.js
new file mode 100644
index 0000000..d5e9b53
--- /dev/null
+++ b/jstests/sharding/mongos_no_detect_sharding.js
@@ -0,0 +1,42 @@
+// Tests whether new sharding is detected on insert by mongos
+
+var st = new ShardingTest( name = "test", shards = 1, verbose = 2, mongos = 2, other = { separateConfig : true } )
+
+var mongos = st.s
+var config = mongos.getDB("config")
+
+config.settings.update({ _id : "balancer" }, { $set : { stopped : true } }, true )
+
+
+print( "Creating unsharded connection..." )
+
+
+var mongos2 = st._mongos[1]
+
+var coll = mongos2.getCollection( "test.foo" )
+coll.insert({ i : 0 })
+
+print( "Sharding collection..." )
+
+var admin = mongos.getDB("admin")
+
+assert.eq( coll.getShardVersion().ok, 0 )
+
+admin.runCommand({ enableSharding : "test" })
+admin.runCommand({ shardCollection : "test.foo", key : { _id : 1 } })
+
+print( "Seeing if data gets inserted unsharded..." )
+print( "No splits occur here!" )
+
+// Insert a bunch of data which should trigger a split
+for( var i = 0; i < 100; i++ ){
+ coll.insert({ i : i + 1 })
+}
+coll.getDB().getLastError()
+
+config.printShardingStatus( true )
+
+assert.eq( coll.getShardVersion().ok, 1 )
+assert.eq( 101, coll.find().itcount() )
+
+st.stop() \ No newline at end of file
diff --git a/jstests/update3.js b/jstests/update3.js
index 4dfeb90..e49f050 100644
--- a/jstests/update3.js
+++ b/jstests/update3.js
@@ -21,3 +21,9 @@ f.drop();
f.save( {'_id':0} );
f.update( {}, {$set:{'_id':5}} );
assert.eq( 0, f.findOne()._id , "D" );
+
+// Test replacement update of a field with an empty string field name.
+f.drop();
+f.save( {'':0} );
+f.update( {}, {$set:{'':'g'}} );
+assert.eq( 'g', f.findOne()[''] , "E" );
diff --git a/pch.h b/pch.h
index 1e9684d..1211e26 100644
--- a/pch.h
+++ b/pch.h
@@ -98,8 +98,6 @@
#include <boost/thread/condition.hpp>
#include <boost/thread/recursive_mutex.hpp>
#include <boost/thread/xtime.hpp>
-#undef assert
-#define assert MONGO_assert
namespace mongo {
@@ -152,6 +150,8 @@ namespace mongo {
}
+#include "util/allocator.h"
+#include "client/redef_macros.h"
// TODO: Rework the headers so we don't need this craziness
#include "bson/inline_decls.h"
@@ -160,7 +160,7 @@ namespace mongo {
#include "util/debug_util.h"
#include "util/goodies.h"
#include "util/log.h"
-#include "util/allocator.h"
+
#include "util/assert_util.h"
namespace mongo {
diff --git a/rpm/mongo.spec b/rpm/mongo.spec
index 4d73803..260274c 100644
--- a/rpm/mongo.spec
+++ b/rpm/mongo.spec
@@ -1,5 +1,5 @@
Name: mongo
-Version: 2.0.4
+Version: 2.0.5
Release: mongodb_1%{?dist}
Summary: mongo client shell and tools
License: AGPL 3.0
diff --git a/s/config.cpp b/s/config.cpp
index 645e923..0f5aaf1 100644
--- a/s/config.cpp
+++ b/s/config.cpp
@@ -150,7 +150,30 @@ namespace mongo {
_save();
}
- return getChunkManager(ns,true,true);
+ ChunkManagerPtr manager = getChunkManager(ns,true,true);
+
+ // Tell the primary mongod to refresh it's data
+ // TODO: Think the real fix here is for mongos to just assume all collections sharded, when we get there
+ for( int i = 0; i < 4; i++ ){
+ if( i == 3 ){
+ warning() << "too many tries updating initial version of " << ns << " on shard primary " << getPrimary() <<
+ ", other mongoses may not see the collection as sharded immediately" << endl;
+ break;
+ }
+ try {
+ ShardConnection conn( getPrimary(), ns );
+ conn.setVersion();
+ conn.done();
+ break;
+ }
+ catch( DBException& e ){
+ warning() << "could not update initial version of " << ns << " on shard primary " << getPrimary() <<
+ causedBy( e ) << endl;
+ }
+ sleepsecs( i );
+ }
+
+ return manager;
}
bool DBConfig::removeSharding( const string& ns ) {
@@ -192,14 +215,13 @@ namespace mongo {
{
scoped_lock lk( _lock );
- CollectionInfo& ci = _collections[ns];
-
- bool earlyReload = ! ci.isSharded() && ( shouldReload || forceReload );
+ bool earlyReload = ! _collections[ns].isSharded() && ( shouldReload || forceReload );
if ( earlyReload ) {
// this is to catch cases where there this is a new sharded collection
_reload();
- ci = _collections[ns];
}
+
+ CollectionInfo& ci = _collections[ns];
massert( 10181 , (string)"not sharded:" + ns , ci.isSharded() );
assert( ! ci.key().isEmpty() );
@@ -710,42 +732,53 @@ namespace mongo {
set<string> got;
ScopedDbConnection conn( _primary, 30.0 );
- auto_ptr<DBClientCursor> c = conn->query( ShardNS::settings , BSONObj() );
- assert( c.get() );
- while ( c->more() ) {
- BSONObj o = c->next();
- string name = o["_id"].valuestrsafe();
- got.insert( name );
- if ( name == "chunksize" ) {
- LOG(1) << "MaxChunkSize: " << o["value"] << endl;
- Chunk::MaxChunkSize = o["value"].numberInt() * 1024 * 1024;
- }
- else if ( name == "balancer" ) {
- // ones we ignore here
- }
- else {
- log() << "warning: unknown setting [" << name << "]" << endl;
- }
- }
- if ( ! got.count( "chunksize" ) ) {
- conn->insert( ShardNS::settings , BSON( "_id" << "chunksize" <<
- "value" << (Chunk::MaxChunkSize / ( 1024 * 1024 ) ) ) );
- }
+ try {
+ auto_ptr<DBClientCursor> c = conn->query( ShardNS::settings , BSONObj() );
+ assert( c.get() );
+ while ( c->more() ) {
+
+ BSONObj o = c->next();
+ string name = o["_id"].valuestrsafe();
+ got.insert( name );
+ if ( name == "chunksize" ) {
+ int csize = o["value"].numberInt();
+
+ // validate chunksize before proceeding
+ if ( csize == 0 ) {
+ // setting was not modified; mark as such
+ got.erase(name);
+ log() << "warning: invalid chunksize (" << csize << ") ignored" << endl;
+ } else {
+ LOG(1) << "MaxChunkSize: " << csize << endl;
+ Chunk::MaxChunkSize = csize * 1024 * 1024;
+ }
+ }
+ else if ( name == "balancer" ) {
+ // ones we ignore here
+ }
+ else {
+ log() << "warning: unknown setting [" << name << "]" << endl;
+ }
+ }
- // indexes
- try {
+ if ( ! got.count( "chunksize" ) ) {
+ conn->insert( ShardNS::settings , BSON( "_id" << "chunksize" <<
+ "value" << (Chunk::MaxChunkSize / ( 1024 * 1024 ) ) ) );
+ }
+
+ // indexes
conn->ensureIndex( ShardNS::chunk , BSON( "ns" << 1 << "min" << 1 ) , true );
conn->ensureIndex( ShardNS::chunk , BSON( "ns" << 1 << "shard" << 1 << "min" << 1 ) , true );
conn->ensureIndex( ShardNS::chunk , BSON( "ns" << 1 << "lastmod" << 1 ) , true );
conn->ensureIndex( ShardNS::shard , BSON( "host" << 1 ) , true );
+
+ conn.done();
}
- catch ( std::exception& e ) {
- log( LL_WARNING ) << "couldn't create indexes on config db: " << e.what() << endl;
+ catch ( DBException& e ) {
+ warning() << "couldn't load settings or create indexes on config db: " << e.what() << endl;
}
-
- conn.done();
}
string ConfigServer::getHost( string name , bool withPort ) {
diff --git a/s/config_migrate.cpp b/s/config_migrate.cpp
index fff023c..7b0c5a6 100644
--- a/s/config_migrate.cpp
+++ b/s/config_migrate.cpp
@@ -37,7 +37,18 @@ namespace mongo {
if ( cur == 0 ) {
ScopedDbConnection conn( _primary );
- conn->insert( "config.version" , BSON( "_id" << 1 << "version" << VERSION ) );
+
+ // If the cluster has not previously been initialized, we need to set the version before using so
+ // subsequent mongoses use the config data the same way. This requires all three config servers online
+ // initially.
+ try {
+ conn->insert( "config.version" , BSON( "_id" << 1 << "version" << VERSION ) );
+ }
+ catch( DBException& e ){
+ error() << "All config servers must initially be reachable for the cluster to be initialized." << endl;
+ throw;
+ }
+
pool.flush();
assert( VERSION == dbConfigVersion( conn.conn() ) );
conn.done();
diff --git a/s/cursors.cpp b/s/cursors.cpp
index 12b3d5e..5957ffc 100644
--- a/s/cursors.cpp
+++ b/s/cursors.cpp
@@ -82,7 +82,10 @@ namespace mongo {
BufBuilder b(32768);
int num = 0;
- bool sendMore = true;
+
+ // Send more if ntoreturn is 0, or any value > 1 (one is assumed to be a single doc return, with no cursor)
+ bool sendMore = ntoreturn == 0 || ntoreturn > 1;
+ ntoreturn = abs( ntoreturn );
while ( _cursor->more() ) {
BSONObj o = _cursor->next();
@@ -99,20 +102,15 @@ namespace mongo {
break;
}
- if ( ntoreturn != 0 && ( -1 * num + _totalSent ) == ntoreturn ) {
- // hard limit - total to send
- sendMore = false;
- break;
- }
-
- if ( ntoreturn == 0 && _totalSent == 0 && num > 100 ) {
+ if ( ntoreturn == 0 && _totalSent == 0 && num >= 100 ) {
// first batch should be max 100 unless batch size specified
break;
}
}
bool hasMore = sendMore && _cursor->more();
- LOG(6) << "\t hasMore:" << hasMore << " wouldSendMoreIfHad: " << sendMore << " id:" << getId() << " totalSent: " << _totalSent << endl;
+ LOG(5) << "\t hasMore: " << hasMore << " sendMore: " << sendMore << " cursorMore: " << _cursor->more() << " ntoreturn: " << ntoreturn
+ << " num: " << num << " wouldSendMoreIfHad: " << sendMore << " id:" << getId() << " totalSent: " << _totalSent << endl;
replyToQuery( 0 , r.p() , r.m() , b.buf() , b.len() , num , _totalSent , hasMore ? getId() : 0 );
_totalSent += num;
diff --git a/s/d_migrate.cpp b/s/d_migrate.cpp
index e24a02d..731761f 100644
--- a/s/d_migrate.cpp
+++ b/s/d_migrate.cpp
@@ -1445,13 +1445,13 @@ namespace mongo {
bool didAnything = false;
if ( xfer["deleted"].isABSONObj() ) {
- writelock lk(ns);
- Client::Context cx(ns);
-
RemoveSaver rs( "moveChunk" , ns , "removedDuring" );
BSONObjIterator i( xfer["deleted"].Obj() );
while ( i.more() ) {
+ writelock lk(ns);
+ Client::Context cx(ns);
+
BSONObj id = i.next().Obj();
// do not apply deletes if they do not belong to the chunk being migrated
@@ -1472,11 +1472,11 @@ namespace mongo {
}
if ( xfer["reload"].isABSONObj() ) {
- writelock lk(ns);
- Client::Context cx(ns);
-
BSONObjIterator i( xfer["reload"].Obj() );
while ( i.more() ) {
+ writelock lk(ns);
+ Client::Context cx(ns);
+
BSONObj it = i.next().Obj();
Helpers::upsert( ns , it );
diff --git a/s/security.cpp b/s/security.cpp
index 0b8954e..68be68a 100644
--- a/s/security.cpp
+++ b/s/security.cpp
@@ -42,13 +42,13 @@ namespace mongo {
static BSONObj userPattern = BSON("user" << 1);
- ShardConnection conn( s, systemUsers );
+ ScopedDbConnection conn( s, 30.0 );
OCCASIONALLY conn->ensureIndex(systemUsers, userPattern, false, "user_1");
{
BSONObjBuilder b;
b << "user" << user;
BSONObj query = b.done();
- userObj = conn->findOne(systemUsers, query);
+ userObj = conn->findOne(systemUsers, query, 0, QueryOption_SlaveOk);
if( userObj.isEmpty() ) {
log() << "auth: couldn't find user " << user << ", " << systemUsers << endl;
conn.done(); // return to pool
diff --git a/s/server.cpp b/s/server.cpp
index a6ffab9..bf8c215 100644
--- a/s/server.cpp
+++ b/s/server.cpp
@@ -290,7 +290,7 @@ int _main(int argc, char* argv[]) {
shardConnectionPool.addHook( new ShardingConnectionHook( true ) );
shardConnectionPool.setName( "mongos shardconnection connectionpool" );
-
+ // Mongos shouldn't lazily kill cursors, otherwise we can end up with extras from migration
DBClientConnection::setLazyKillCursor( false );
ReplicaSetMonitor::setConfigChangeHook( boost::bind( &ConfigServer::replicaSetChange , &configServer , _1 ) );
diff --git a/s/strategy_shard.cpp b/s/strategy_shard.cpp
index c6b30e7..c96a7e1 100644
--- a/s/strategy_shard.cpp
+++ b/s/strategy_shard.cpp
@@ -91,10 +91,10 @@ namespace mongo {
}
ShardedClientCursorPtr cc (new ShardedClientCursor( q , cursor ));
- if ( ! cc->sendNextBatch( r ) ) {
+ if ( ! cc->sendNextBatch( r, q.ntoreturn ) ) {
return;
}
- LOG(6) << "storing cursor : " << cc->getId() << endl;
+ LOG(5) << "storing cursor : " << cc->getId() << endl;
cursorCache.store( cc );
}
diff --git a/tools/top.cpp b/tools/top.cpp
index 42e4568..ff5a94b 100644
--- a/tools/top.cpp
+++ b/tools/top.cpp
@@ -1,4 +1,4 @@
-// stat.cpp
+// top.cpp
/**
* Copyright (C) 2008 10gen Inc.
@@ -91,9 +91,9 @@ namespace mongo {
cout << "\n"
<< setw(longest) << "ns"
- << "\ttotal "
- << "\tread "
- << "\twrite "
+ << "\ttotal"
+ << "\tread"
+ << "\twrite"
<< "\t\t" << terseCurrentTime()
<< endl;
for ( int i=data.size()-1; i>=0 && data.size() - i < 10 ; i-- ) {
@@ -107,6 +107,8 @@ namespace mongo {
int run() {
_sleep = getParam( "sleep" , _sleep );
+
+ auth();
BSONObj prev = getData();
diff --git a/util/allocator.h b/util/allocator.h
index a642e7c..23ee4ee 100644
--- a/util/allocator.h
+++ b/util/allocator.h
@@ -32,8 +32,6 @@ namespace mongo {
}
#define MONGO_malloc mongo::ourmalloc
-#define malloc MONGO_malloc
#define MONGO_realloc mongo::ourrealloc
-#define realloc MONGO_realloc
} // namespace mongo
diff --git a/util/assert_util.h b/util/assert_util.h
index b4c68b7..c35f651 100644
--- a/util/assert_util.h
+++ b/util/assert_util.h
@@ -171,20 +171,13 @@ namespace mongo {
/** in the mongodb source, use verify() instead of assert(). verify is always evaluated even in release builds. */
inline void verify( int msgid , bool testOK ) { if ( ! testOK ) verifyFailed( msgid ); }
-#ifdef assert
-#undef assert
-#endif
-
#define MONGO_assert(_Expression) (void)( MONGO_likely(!!(_Expression)) || (mongo::asserted(#_Expression, __FILE__, __LINE__), 0) )
-#define assert MONGO_assert
/* "user assert". if asserts, user did something wrong, not our code */
#define MONGO_uassert(msgid, msg, expr) (void)( MONGO_likely(!!(expr)) || (mongo::uasserted(msgid, msg), 0) )
-#define uassert MONGO_uassert
/* warning only - keeps going */
#define MONGO_wassert(_Expression) (void)( MONGO_likely(!!(_Expression)) || (mongo::wasserted(#_Expression, __FILE__, __LINE__), 0) )
-#define wassert MONGO_wassert
/* display a message, no context, and throw assertionexception
@@ -192,7 +185,6 @@ namespace mongo {
display happening.
*/
#define MONGO_massert(msgid, msg, expr) (void)( MONGO_likely(!!(expr)) || (mongo::msgasserted(msgid, msg), 0) )
-#define massert MONGO_massert
/* dassert is 'debug assert' -- might want to turn off for production as these
could be slow.
@@ -202,7 +194,6 @@ namespace mongo {
#else
# define MONGO_dassert(x)
#endif
-#define dassert MONGO_dassert
// some special ids that we want to duplicate
diff --git a/util/logfile.cpp b/util/logfile.cpp
index 609edb8..65c56e2 100644
--- a/util/logfile.cpp
+++ b/util/logfile.cpp
@@ -170,21 +170,29 @@ namespace mongo {
}
void LogFile::synchronousAppend(const void *b, size_t len) {
+
+ const char *buf = static_cast<const char *>( b );
+ ssize_t charsToWrite = static_cast<ssize_t>( len );
+
+ if (charsToWrite < 0 || _fd < 0) {
+ log() << "LogFile::synchronousAppend preconditions not met.";
+ ::abort();
+ }
+
#ifdef POSIX_FADV_DONTNEED
const off_t pos = lseek(_fd, 0, SEEK_CUR); // doesn't actually seek
#endif
- const char *buf = (char *) b;
- assert(_fd);
- assert(((size_t)buf)%4096==0); // aligned
- if( len % 4096 != 0 ) {
- log() << len << ' ' << len % 4096 << endl;
- assert(false);
- }
- ssize_t written = write(_fd, buf, len);
- if( written != (ssize_t) len ) {
- log() << "write fails written:" << written << " len:" << len << " buf:" << buf << ' ' << errnoWithDescription() << endl;
- uasserted(13515, str::stream() << "error appending to file " << _fd << ' ' << errnoWithDescription());
+ while ( charsToWrite > 0 ) {
+ const ssize_t written = write( _fd, buf, static_cast<size_t>( charsToWrite ) );
+ if ( -1 == written ) {
+ log() << "LogFile::synchronousAppend failed with " << charsToWrite
+ << " bytes unwritten out of " << len << " bytes; b=" << b << ' '
+ << errnoWithDescription() << std::endl;
+ ::abort();
+ }
+ buf += written;
+ charsToWrite -= written;
}
if(
@@ -194,7 +202,8 @@ namespace mongo {
fsync(_fd)
#endif
) {
- uasserted(13514, str::stream() << "error appending to file on fsync " << ' ' << errnoWithDescription());
+ log() << "error appending to file on fsync " << ' ' << errnoWithDescription();
+ ::abort();
}
#ifdef POSIX_FADV_DONTNEED
diff --git a/util/mmap_win.cpp b/util/mmap_win.cpp
index 9173d7b..71bcceb 100644
--- a/util/mmap_win.cpp
+++ b/util/mmap_win.cpp
@@ -20,6 +20,7 @@
#include "text.h"
#include "../db/mongommf.h"
#include "../db/concurrency.h"
+#include "timer.h"
namespace mongo {
@@ -166,13 +167,39 @@ namespace mongo {
scoped_lock lk(*_flushMutex);
- bool success = FlushViewOfFile(_view, 0); // 0 means whole mapping
- if (!success) {
- int err = GetLastError();
- out() << "FlushViewOfFile failed " << err << " file: " << _filename << endl;
+ int loopCount = 0;
+ bool success = false;
+ bool timeout = false;
+ int dosError = ERROR_SUCCESS;
+ const int maximumLoopCount = 1000 * 1000;
+ const int maximumTimeInSeconds = 60;
+ Timer t;
+ while ( !success && !timeout && loopCount < maximumLoopCount ) {
+ ++loopCount;
+ success = FALSE != FlushViewOfFile( _view, 0 );
+ if ( !success ) {
+ dosError = GetLastError();
+ if ( dosError != ERROR_LOCK_VIOLATION ) {
+ break;
+ }
+ timeout = t.seconds() > maximumTimeInSeconds;
+ }
+ }
+ if ( success && loopCount > 1 ) {
+ log() << "FlushViewOfFile for " << _filename
+ << " succeeded after " << loopCount
+ << " attempts taking " << t.millis()
+ << " ms" << endl;
+ }
+ else if ( !success ) {
+ log() << "FlushViewOfFile for " << _filename
+ << " failed with error " << dosError
+ << " after " << loopCount
+ << " attempts taking " << t.millis()
+ << " ms" << endl;
}
- success = FlushFileBuffers(_fd);
+ success = FALSE != FlushFileBuffers(_fd);
if (!success) {
int err = GetLastError();
out() << "FlushFileBuffers failed " << err << " file: " << _filename << endl;
diff --git a/util/version.cpp b/util/version.cpp
index c644e75..8b08ff4 100644
--- a/util/version.cpp
+++ b/util/version.cpp
@@ -38,7 +38,7 @@ namespace mongo {
* 1.2.3-rc4-pre-
* If you really need to do something else you'll need to fix _versionArray()
*/
- const char versionString[] = "2.0.4";
+ const char versionString[] = "2.0.5";
// See unit test for example outputs
static BSONArray _versionArray(const char* version){