summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAntonin Kral <a.kral@bobek.cz>2010-08-18 09:23:15 +0200
committerAntonin Kral <a.kral@bobek.cz>2010-08-18 09:23:15 +0200
commit3b9581e84e19723087b31b79674801b6c06ae533 (patch)
tree5568dbd7199b1ce64ace1cec26b590c1431e872d
parentd7ec8115ec4b160ed74c1a8a06a1c171d76370ac (diff)
downloadmongodb-3b9581e84e19723087b31b79674801b6c06ae533.tar.gz
Imported Upstream version 1.6.1
-rw-r--r--bson/bsoninlines.h11
-rw-r--r--bson/util/builder.h12
-rw-r--r--client/dbclient.cpp12
-rw-r--r--client/dbclient.h19
-rw-r--r--client/dbclientcursor.cpp2
-rw-r--r--client/distlock.cpp32
-rw-r--r--client/distlock.h10
-rw-r--r--client/gridfs.cpp3
-rw-r--r--client/syncclusterconnection.cpp12
-rw-r--r--client/syncclusterconnection.h2
-rw-r--r--db/dbcommands.cpp13
-rw-r--r--db/dbwebserver.cpp2
-rw-r--r--db/instance.h1
-rw-r--r--db/namespace.cpp2
-rw-r--r--db/repl.cpp17
-rw-r--r--dbtests/jsobjtests.cpp30
-rw-r--r--debian/changelog6
-rw-r--r--debian/files1
-rw-r--r--debian/mongodb.upstart15
-rw-r--r--debian/preinst37
-rw-r--r--doxygenConfig2
-rw-r--r--jstests/dbadmin.js6
-rw-r--r--jstests/slowNightly/sharding_rs1.js13
-rw-r--r--lib/libboost_thread-gcc41-mt-d-1_34_1.abin0 -> 692920 bytes
-rw-r--r--rpm/mongo.spec2
-rw-r--r--s/chunk.cpp16
-rw-r--r--s/chunk.h4
-rw-r--r--s/config.cpp8
-rw-r--r--s/d_migrate.cpp22
-rw-r--r--s/d_state.cpp3
-rw-r--r--s/d_writeback.cpp2
-rw-r--r--s/request.cpp3
-rw-r--r--s/server.cpp4
-rw-r--r--s/util.h17
-rw-r--r--shell/servers.js5
-rw-r--r--util/background.cpp5
-rw-r--r--util/log.h122
-rw-r--r--util/message.cpp4
-rw-r--r--util/version.cpp2
39 files changed, 343 insertions, 136 deletions
diff --git a/bson/bsoninlines.h b/bson/bsoninlines.h
index f4140a3..0a2e59b 100644
--- a/bson/bsoninlines.h
+++ b/bson/bsoninlines.h
@@ -387,16 +387,7 @@ namespace mongo {
}
break;
case NumberDouble:
- {
- stringstream tmp;
- tmp.precision( 16 );
- tmp << number();
- string n = tmp.str();
- s << n;
- // indicate this is a double:
- if( strchr(n.c_str(), '.') == 0 && strchr(n.c_str(), 'E') == 0 && strchr(n.c_str(), 'N') == 0 )
- s << ".0";
- }
+ s.appendDoubleNice( number() );
break;
case NumberLong:
s << _numberLong();
diff --git a/bson/util/builder.h b/bson/util/builder.h
index 75a1ad8..9d9eda2 100644
--- a/bson/util/builder.h
+++ b/bson/util/builder.h
@@ -164,6 +164,7 @@ namespace mongo {
#define SBNUM(val,maxSize,macro) \
int prev = _buf.l; \
int z = sprintf( _buf.grow(maxSize) , macro , (val) ); \
+ assert( z >= 0 ); \
_buf.l = prev + z; \
return *this;
@@ -197,6 +198,17 @@ namespace mongo {
}
#undef SBNUM
+ void appendDoubleNice( double x ){
+ int prev = _buf.l;
+ char * start = _buf.grow( 32 );
+ int z = sprintf( start , "%.16g" , x );
+ assert( z >= 0 );
+ _buf.l = prev + z;
+ if( strchr(start, '.') == 0 && strchr(start, 'E') == 0 && strchr(start, 'N') == 0 ){
+ write( ".0" , 2 );
+ }
+ }
+
void write( const char* buf, int len){
memcpy( _buf.grow( len ) , buf , len );
}
diff --git a/client/dbclient.cpp b/client/dbclient.cpp
index 04b6147..26b1c26 100644
--- a/client/dbclient.cpp
+++ b/client/dbclient.cpp
@@ -1063,6 +1063,18 @@ namespace mongo {
return checkMaster()->findOne(a,b,c,d);
}
+ bool DBClientReplicaSet::isMember( const DBConnector * conn ) const {
+ if ( conn == this )
+ return true;
+
+ for ( unsigned i=0; i<_conns.size(); i++ )
+ if ( _conns[i]->isMember( conn ) )
+ return true;
+
+ return false;
+ }
+
+
bool serverAlive( const string &uri ) {
DBClientConnection c( false, 0, 20 ); // potentially the connection to server could fail while we're checking if it's alive - so use timeouts
string err;
diff --git a/client/dbclient.h b/client/dbclient.h
index 639d960..ea370c4 100644
--- a/client/dbclient.h
+++ b/client/dbclient.h
@@ -329,6 +329,8 @@ namespace mongo {
/* used by QueryOption_Exhaust. To use that your subclass must implement this. */
virtual void recv( Message& m ) { assert(false); }
+
+ virtual string getServerAddress() const = 0;
};
/**
@@ -739,8 +741,6 @@ namespace mongo {
*/
virtual void update( const string &ns , Query query , BSONObj obj , bool upsert = false , bool multi = false );
- virtual string getServerAddress() const = 0;
-
virtual bool isFailed() const = 0;
virtual void killCursor( long long cursorID ) = 0;
@@ -758,6 +758,9 @@ namespace mongo {
virtual void say( Message& toSend ) = 0;
virtual ConnectionString::ConnectionType type() const = 0;
+
+ /** @return true if conn is either equal to or contained in this connection */
+ virtual bool isMember( const DBConnector * conn ) const = 0;
}; // DBClientBase
class DBClientReplicaSet;
@@ -892,11 +895,16 @@ namespace mongo {
virtual bool call( Message &toSend, Message &response, bool assertOk = true );
virtual ConnectionString::ConnectionType type() const { return ConnectionString::MASTER; }
+
+ virtual bool isMember( const DBConnector * conn ) const { return this == conn; };
+
+ virtual void checkResponse( const char *data, int nReturned );
+
protected:
friend class SyncClusterConnection;
virtual void recv( Message& m );
virtual void sayPiggyBack( Message &toSend );
- virtual void checkResponse( const char *data, int nReturned );
+
};
/** Use this class to connect to a replica set of servers. The class will manage
@@ -987,9 +995,12 @@ namespace mongo {
virtual ConnectionString::ConnectionType type() const { return ConnectionString::SET; }
+ virtual bool isMember( const DBConnector * conn ) const;
+
+ virtual void checkResponse( const char *data, int nReturned ) { checkMaster()->checkResponse( data , nReturned ); }
+
protected:
virtual void sayPiggyBack( Message &toSend ) { assert(false); }
- virtual void checkResponse( const char *data, int nReturned ) { assert(false); }
bool isFailed() const {
return _currentMaster == 0 || _currentMaster->isFailed();
diff --git a/client/dbclientcursor.cpp b/client/dbclientcursor.cpp
index 07771bb..5f9db43 100644
--- a/client/dbclientcursor.cpp
+++ b/client/dbclientcursor.cpp
@@ -193,7 +193,7 @@ namespace mongo {
void DBClientCursor::attach( AScopedConnection * conn ){
assert( _scopedHost.size() == 0 );
- assert( connector == conn->get() );
+ assert( conn->get()->isMember( connector ) );
_scopedHost = conn->getHost();
conn->done();
connector = 0;
diff --git a/client/distlock.cpp b/client/distlock.cpp
index c264597..245eb7e 100644
--- a/client/distlock.cpp
+++ b/client/distlock.cpp
@@ -109,9 +109,6 @@ namespace mongo {
bool DistributedLock::lock_try( string why , BSONObj * other ){
- // check for recrusive
- assert( getState() == 0 );
-
ScopedDbConnection conn( _conn );
BSONObjBuilder queryBuilder;
@@ -208,18 +205,33 @@ namespace mongo {
if ( ! gotLock )
return false;
- _state.set( 1 );
return true;
}
void DistributedLock::unlock(){
- ScopedDbConnection conn( _conn );
- conn->update( _ns , _id, BSON( "$set" << BSON( "state" << 0 ) ) );
- log(1) << "dist_lock unlock: " << conn->findOne( _ns , _id ) << endl;
- conn.done();
+ const int maxAttempts = 3;
+ int attempted = 0;
+ while ( ++attempted <= maxAttempts ) {
+
+ try {
+ ScopedDbConnection conn( _conn );
+ conn->update( _ns , _id, BSON( "$set" << BSON( "state" << 0 ) ) );
+ log(1) << "dist_lock unlock: " << conn->findOne( _ns , _id ) << endl;
+ conn.done();
+
+ return;
+
- _state.set( 0 );
- }
+ } catch ( std::exception& e) {
+ log( LL_WARNING ) << "dist_lock " << _name << " failed to contact config server in unlock attempt "
+ << attempted << ": " << e.what() << endl;
+ sleepsecs(1 << attempted);
+ }
+ }
+
+ log( LL_WARNING ) << "dist_lock couldn't consumate unlock request. " << "Lock " << _name
+ << " will be taken over after " << _takeoverMinutes << " minutes timeout" << endl;
+ }
}
diff --git a/client/distlock.h b/client/distlock.h
index 3a03390..8a77338 100644
--- a/client/distlock.h
+++ b/client/distlock.h
@@ -36,14 +36,6 @@ namespace mongo {
*/
DistributedLock( const ConnectionString& conn , const string& name , unsigned takeoverMinutes = 10 );
- int getState(){
- return _state.get();
- }
-
- bool isLocked(){
- return _state.get() != 0;
- }
-
bool lock_try( string why , BSONObj * other = 0 );
void unlock();
@@ -54,8 +46,6 @@ namespace mongo {
string _ns;
BSONObj _id;
-
- ThreadLocalValue<int> _state;
};
class dist_lock_try {
diff --git a/client/gridfs.cpp b/client/gridfs.cpp
index b2ae478..d740c76 100644
--- a/client/gridfs.cpp
+++ b/client/gridfs.cpp
@@ -66,7 +66,6 @@ namespace mongo {
}
BSONObj GridFS::storeFile( const char* data , size_t length , const string& remoteName , const string& contentType){
- massert( 10279 , "large files not yet implemented", length <= 0xffffffff);
char const * const end = data + length;
OID id;
@@ -127,8 +126,6 @@ namespace mongo {
if (fd != stdin)
fclose( fd );
- massert( 10280 , "large files not yet implemented", length <= 0xffffffff);
-
return insertFile((remoteName.empty() ? fileName : remoteName), id, length, contentType);
}
diff --git a/client/syncclusterconnection.cpp b/client/syncclusterconnection.cpp
index 5324b6c..99f6067 100644
--- a/client/syncclusterconnection.cpp
+++ b/client/syncclusterconnection.cpp
@@ -369,4 +369,16 @@ namespace mongo {
// should never need to do this
assert(0);
}
+
+ bool SyncClusterConnection::isMember( const DBConnector * conn ) const {
+ if ( conn == this )
+ return true;
+
+ for ( unsigned i=0; i<_conns.size(); i++ )
+ if ( _conns[i]->isMember( conn ) )
+ return true;
+
+ return false;
+ }
+
}
diff --git a/client/syncclusterconnection.h b/client/syncclusterconnection.h
index d1115f7..4292e3d 100644
--- a/client/syncclusterconnection.h
+++ b/client/syncclusterconnection.h
@@ -90,6 +90,8 @@ namespace mongo {
virtual ConnectionString::ConnectionType type() const { return ConnectionString::SYNC; }
+ virtual bool isMember( const DBConnector * conn ) const;
+
private:
SyncClusterConnection( SyncClusterConnection& prev );
string _toString() const;
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index 94edf0a..34d00c8 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -1451,17 +1451,18 @@ namespace mongo {
BSONElementSet values;
shared_ptr<Cursor> cursor = bestGuessCursor(ns.c_str() , query , BSONObj() );
+ scoped_ptr<ClientCursor> cc (new ClientCursor(QueryOption_NoCursorTimeout, cursor, ns));
while ( cursor->ok() ){
- if ( cursor->matcher() && ! cursor->matcher()->matchesCurrent( cursor.get() ) ){
- cursor->advance();
- continue;
+ if ( !cursor->matcher() || cursor->matcher()->matchesCurrent( cursor.get() ) ){
+ BSONObj o = cursor->current();
+ o.getFieldsDotted( key, values );
}
- BSONObj o = cursor->current();
cursor->advance();
-
- o.getFieldsDotted( key.c_str(), values );
+
+ if (!cc->yieldSometimes())
+ break;
}
BSONArrayBuilder b( result.subarrayStart( "values" ) );
diff --git a/db/dbwebserver.cpp b/db/dbwebserver.cpp
index 7e45d8f..f17a283 100644
--- a/db/dbwebserver.cpp
+++ b/db/dbwebserver.cpp
@@ -432,7 +432,7 @@ namespace mongo {
ss << p( a("/", "back", "Home") );
ss << p( "<b>MongoDB List of <a href=\"http://www.mongodb.org/display/DOCS/Commands\">Commands</a></b>\n" );
const map<string, Command*> *m = Command::commandsByBestName();
- ss << "S:slave-only N:no-lock R:read-lock W:write-lock A:admin-only<br>\n";
+ ss << "S:slave-ok R:read-lock W:write-lock A:admin-only<br>\n";
ss << table();
ss << "<tr><th>Command</th><th>Attributes</th><th>Help</th></tr>\n";
for( map<string, Command*>::const_iterator i = m->begin(); i != m->end(); i++ )
diff --git a/db/instance.h b/db/instance.h
index 1a4b855..5458fc1 100644
--- a/db/instance.h
+++ b/db/instance.h
@@ -143,6 +143,7 @@ namespace mongo {
}
virtual ConnectionString::ConnectionType type() const { return ConnectionString::MASTER; }
+ virtual bool isMember( const DBConnector * conn ) const { return this == conn; };
};
extern int lockFile;
diff --git a/db/namespace.cpp b/db/namespace.cpp
index 8be6655..de3f4df 100644
--- a/db/namespace.cpp
+++ b/db/namespace.cpp
@@ -637,6 +637,8 @@ namespace mongo {
}
bool legalClientSystemNS( const string& ns , bool write ){
+ if( ns == "local.system.replset" ) return true;
+
if ( ns.find( ".system.users" ) != string::npos )
return true;
diff --git a/db/repl.cpp b/db/repl.cpp
index 8cdd545..37197ba 100644
--- a/db/repl.cpp
+++ b/db/repl.cpp
@@ -858,11 +858,13 @@ namespace mongo {
see logOp() comments.
*/
void ReplSource::sync_pullOpLog_applyOperation(BSONObj& op, OpTime *localLogTail) {
- log( 6 ) << "processing op: " << op << endl;
+ if( logLevel >= 6 ) // op.tostring is expensive so doing this check explicitly
+ log(6) << "processing op: " << op << endl;
+
// skip no-op
- if ( op.getStringField( "op" )[ 0 ] == 'n' )
+ if( op.getStringField("op")[0] == 'n' )
return;
-
+
char clientName[MaxDatabaseLen];
const char *ns = op.getStringField("ns");
nsToDatabase(ns, clientName);
@@ -1303,12 +1305,15 @@ namespace mongo {
1) find most recent op in local log
2) more()?
*/
- if ( !oplogReader.more() ) {
+
+ bool moreInitialSyncsPending = !addDbNextPass.empty() && n; // we need "&& n" to assure we actually process at least one op to get a sync point recorded in the first place.
+
+ if ( moreInitialSyncsPending || !oplogReader.more() ) {
dblock lk;
OpTime nextLastSaved = nextLastSavedLocalTs();
{
dbtemprelease t;
- if ( oplogReader.more() ) {
+ if ( !moreInitialSyncsPending && oplogReader.more() ) {
if ( getInitialSyncCompleted() ) { // if initial sync hasn't completed, break out of loop so we can set to completed or clone more dbs
continue;
}
@@ -1325,6 +1330,8 @@ namespace mongo {
log() << "repl: end sync_pullOpLog syncedTo: " << syncedTo.toStringLong() << endl;
break;
}
+ else {
+ }
OCCASIONALLY if( n > 0 && ( n > 100000 || time(0) - saveLast > 60 ) ) {
// periodically note our progress, in case we are doing a lot of work and crash
diff --git a/dbtests/jsobjtests.cpp b/dbtests/jsobjtests.cpp
index c89ef06..ea7606f 100644
--- a/dbtests/jsobjtests.cpp
+++ b/dbtests/jsobjtests.cpp
@@ -389,6 +389,35 @@ namespace JsobjTests {
}
};
+ class ToStringNumber {
+ public:
+
+ void run(){
+ BSONObjBuilder b;
+ b.append( "a" , (int)4 );
+ b.append( "b" , (double)5 );
+ b.append( "c" , (long long)6 );
+
+ b.append( "d" , 123.456789123456789123456789123456789 );
+ b.append( "e" , 123456789.123456789123456789123456789 );
+ b.append( "f" , 1234567891234567891234.56789123456789 );
+
+ b.append( "g" , -123.456 );
+
+ BSONObj x = b.obj();
+ ASSERT_EQUALS( "4", x["a"].toString( false , true ) );
+ ASSERT_EQUALS( "5.0", x["b"].toString( false , true ) );
+ ASSERT_EQUALS( "6", x["c"].toString( false , true ) );
+
+ ASSERT_EQUALS( "123.4567891234568" , x["d"].toString( false , true ) );
+ ASSERT_EQUALS( "123456789.1234568" , x["e"].toString( false , true ) );
+ // ASSERT_EQUALS( "1.234567891234568e+21" , x["f"].toString( false , true ) ); // windows and *nix are different - TODO, work around for test or not bother?
+
+ ASSERT_EQUALS( "-123.456" , x["g"].toString( false , true ) );
+
+ }
+ };
+
class NullString {
public:
void run() {
@@ -1693,6 +1722,7 @@ namespace JsobjTests {
add< BSONObjTests::AppendIntOrLL >();
add< BSONObjTests::AppendNumber >();
add< BSONObjTests::ToStringArray >();
+ add< BSONObjTests::ToStringNumber >();
add< BSONObjTests::NullString >();
add< BSONObjTests::Validation::BadType >();
add< BSONObjTests::Validation::EooBeforeEnd >();
diff --git a/debian/changelog b/debian/changelog
index 3281e18..529c796 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,5 +1,11 @@
mongodb (1.6.0) unstable; urgency=low
+ * replica_sets some fixes
+ * sharding some fixes with rs
+ * full change log http://jira.mongodb.org/browse/SERVER/fixforversion/10183
+
+mongodb (1.6.0) unstable; urgency=low
+
* sharding stable
* replica_sets stable
diff --git a/debian/files b/debian/files
new file mode 100644
index 0000000..2e28959
--- /dev/null
+++ b/debian/files
@@ -0,0 +1 @@
+mongodb_0.9.7_amd64.deb devel optional
diff --git a/debian/mongodb.upstart b/debian/mongodb.upstart
new file mode 100644
index 0000000..ca6f9b7
--- /dev/null
+++ b/debian/mongodb.upstart
@@ -0,0 +1,15 @@
+# Ubuntu upstart file at /etc/init/mongodb.conf
+
+pre-start script
+ mkdir -p /var/lib/mongodb/
+ mkdir -p /var/log/mongodb/
+end script
+
+start on runlevel [2345]
+stop on runlevel [06]
+
+script
+ ENABLE_MONGODB="yes"
+ if [ -f /etc/default/mongodb ]; then . /etc/default/mongodb; fi
+ if [ "x$ENABLE_MONGODB" = "xyes" ]; then exec start-stop-daemon --start --quiet --chuid mongodb --exec /usr/bin/mongod -- --config /etc/mongodb.conf; fi
+end script
diff --git a/debian/preinst b/debian/preinst
new file mode 100644
index 0000000..c2d5362
--- /dev/null
+++ b/debian/preinst
@@ -0,0 +1,37 @@
+#!/bin/sh
+# preinst script for mongodb
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * <new-preinst> `install'
+# * <new-preinst> `install' <old-version>
+# * <new-preinst> `upgrade' <old-version>
+# * <old-preinst> `abort-upgrade' <new-version>
+# for details, see http://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ install|upgrade)
+ ;;
+
+ abort-upgrade)
+ ;;
+
+ *)
+ echo "preinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
+
+
diff --git a/doxygenConfig b/doxygenConfig
index 6e9cb02..a5e3f0c 100644
--- a/doxygenConfig
+++ b/doxygenConfig
@@ -3,7 +3,7 @@
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = MongoDB
-PROJECT_NUMBER = 1.6.0
+PROJECT_NUMBER = 1.6.1
OUTPUT_DIRECTORY = docs/doxygen
CREATE_SUBDIRS = NO
OUTPUT_LANGUAGE = English
diff --git a/jstests/dbadmin.js b/jstests/dbadmin.js
index 8ea0426..b209b0d 100644
--- a/jstests/dbadmin.js
+++ b/jstests/dbadmin.js
@@ -21,4 +21,10 @@ assert( res.databases && res.databases.length > 0 , "listDatabases 1 " + tojson(
x = db._adminCommand( "ismaster" );
assert( x.ismaster , "ismaster failed: " + tojson( x ) )
+before = db.runCommand( "serverStatus" )
+sleep( 5000 )
+after = db.runCommand( "serverStatus" )
+assert.lt( 3 , after.uptimeEstimate , "up1" )
+assert.gt( after.uptimeEstimate , before.uptimeEstimate , "up2" )
+
// TODO: add more tests here
diff --git a/jstests/slowNightly/sharding_rs1.js b/jstests/slowNightly/sharding_rs1.js
index 3769e32..b7d90ba 100644
--- a/jstests/slowNightly/sharding_rs1.js
+++ b/jstests/slowNightly/sharding_rs1.js
@@ -15,7 +15,7 @@ while ( bigString.length < 10000 )
inserted = 0;
num = 0;
while ( inserted < ( 20 * 1024 * 1024 ) ){
- db.foo.insert( { _id : num++ , s : bigString } );
+ db.foo.insert( { _id : num++ , s : bigString , x : Math.random() } );
inserted += bigString.length;
}
@@ -48,6 +48,7 @@ assert.soon( function(){
return d < 5;
} , "balance didn't happen" , 1000 * 60 * 3 , 5000 );
+s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
for ( i=0; i<s._rs.length; i++ ){
r = s._rs[i];
@@ -58,4 +59,14 @@ for ( i=0; i<s._rs.length; i++ ){
assert.eq( x.master.md5 , x.slaves[j].md5 , "hashes same for: " + r.url + " slave: " + j );
}
+
+assert.eq( num , db.foo.find().count() , "C1" )
+assert.eq( num , db.foo.find().itcount() , "C2" )
+assert.eq( num , db.foo.find().sort( { _id : 1 } ).itcount() , "C3" )
+assert.eq( num , db.foo.find().sort( { _id : -1 } ).itcount() , "C4" )
+db.foo.ensureIndex( { x : 1 } )
+assert.eq( num , db.foo.find().sort( { x : 1 } ).itcount() , "C5" )
+assert.eq( num , db.foo.find().sort( { x : -1 } ).itcount() , "C6" )
+
+
s.stop()
diff --git a/lib/libboost_thread-gcc41-mt-d-1_34_1.a b/lib/libboost_thread-gcc41-mt-d-1_34_1.a
new file mode 100644
index 0000000..09377ac
--- /dev/null
+++ b/lib/libboost_thread-gcc41-mt-d-1_34_1.a
Binary files differ
diff --git a/rpm/mongo.spec b/rpm/mongo.spec
index defc29f..7044c0d 100644
--- a/rpm/mongo.spec
+++ b/rpm/mongo.spec
@@ -1,5 +1,5 @@
Name: mongo
-Version: 1.6.0
+Version: 1.6.1
Release: mongodb_1%{?dist}
Summary: mongo client shell and tools
License: AGPL 3.0
diff --git a/s/chunk.cpp b/s/chunk.cpp
index 5df3b69..50d4e76 100644
--- a/s/chunk.cpp
+++ b/s/chunk.cpp
@@ -265,7 +265,7 @@ namespace mongo {
}
// Save the new key boundaries in the configDB.
- _manager->save();
+ _manager->save( false );
// Log all these changes in the configDB's log. We log a simple split differently than a multi-split.
if ( newChunks.size() == 1) {
@@ -584,7 +584,7 @@ namespace mongo {
_shards.insert(c->getShard());
- save_inlock();
+ save_inlock( true );
log() << "no chunks for:" << ns << " so creating first: " << c->toString() << endl;
}
}
@@ -880,16 +880,18 @@ namespace mongo {
configServer.logChange( "dropCollection" , _ns , BSONObj() );
}
- void ChunkManager::save(){
+ void ChunkManager::save( bool major ){
rwlock lk( _lock , true );
- save_inlock();
+ save_inlock( major );
}
- void ChunkManager::save_inlock(){
+ void ChunkManager::save_inlock( bool major ){
ShardChunkVersion a = getVersion_inlock();
assert( a > 0 || _chunkMap.size() <= 1 );
- ShardChunkVersion nextChunkVersion = a.incMajor();
+ ShardChunkVersion nextChunkVersion = a;
+ nextChunkVersion.inc( major );
+
vector<ChunkPtr> toFix;
vector<ShardChunkVersion> newVersions;
@@ -907,7 +909,7 @@ namespace mongo {
_sequenceNumber = ++NextSequenceNumber;
ShardChunkVersion myVersion = nextChunkVersion;
- ++nextChunkVersion;
+ nextChunkVersion.incMinor();
toFix.push_back( c );
newVersions.push_back( myVersion );
diff --git a/s/chunk.h b/s/chunk.h
index 2ec43a6..b81b788 100644
--- a/s/chunk.h
+++ b/s/chunk.h
@@ -269,7 +269,7 @@ namespace mongo {
void getAllShards( set<Shard>& all );
void getShardsForRange(set<Shard>& shards, const BSONObj& min, const BSONObj& max); // [min, max)
- void save();
+ void save( bool major );
string toString() const;
@@ -307,7 +307,7 @@ namespace mongo {
void _reload_inlock();
void _load();
- void save_inlock();
+ void save_inlock( bool major );
ShardChunkVersion getVersion_inlock() const;
void ensureIndex_inlock();
diff --git a/s/config.cpp b/s/config.cpp
index 65f56cb..e1016a0 100644
--- a/s/config.cpp
+++ b/s/config.cpp
@@ -653,7 +653,13 @@ namespace mongo {
BSONObj msg = BSON( "_id" << id.str() << "server" << getHostNameCached() << "time" << DATENOW <<
"what" << what << "ns" << ns << "details" << detail );
log() << "config change: " << msg << endl;
- conn->insert( "config.changelog" , msg );
+
+ try {
+ conn->insert( "config.changelog" , msg );
+ }
+ catch ( std::exception& e ){
+ log() << "not logging config change: " << e.what() << endl;
+ }
conn.done();
}
diff --git a/s/d_migrate.cpp b/s/d_migrate.cpp
index 84d70c5..fac63af 100644
--- a/s/d_migrate.cpp
+++ b/s/d_migrate.cpp
@@ -528,7 +528,8 @@ namespace mongo {
log(0) << "_recvChunkStatus : " << res << endl;
- if ( ! ok ){
+ if ( ! ok || res["state"].String() == "fail" ){
+ log( LL_ERROR ) << "_recvChunkStatus error : " << res << endl;
errmsg = "_recvChunkStatus error";
result.append( "cause" ,res );
return false;
@@ -544,7 +545,7 @@ namespace mongo {
// 5.a
migrateFromStatus._inCriticalSection = true;
ShardChunkVersion myVersion = maxVersion;
- ++myVersion;
+ myVersion.incMajor();
{
dblock lk;
@@ -587,7 +588,8 @@ namespace mongo {
if ( ! x.isEmpty() ){
BSONObjBuilder temp2;
- ++myVersion;
+ myVersion.incMinor();
+
temp2.appendTimestamp( "lastmod" , myVersion );
shardingState.setVersion( ns , myVersion );
@@ -685,10 +687,12 @@ namespace mongo {
catch ( std::exception& e ){
state = FAIL;
errmsg = e.what();
+ log( LL_ERROR ) << "migrate failed: " << e.what() << endl;
}
catch ( ... ){
state = FAIL;
errmsg = "UNKNOWN ERROR";
+ log( LL_ERROR ) << "migrate failed with unknown exception" << endl;
}
active = false;
}
@@ -739,7 +743,7 @@ namespace mongo {
auto_ptr<DBClientCursor> cursor = conn->query( ns , Query().minKey( min ).maxKey( max ) , /* QueryOption_Exhaust */ 0 );
assert( cursor.get() );
while ( cursor->more() ){
- BSONObj o = cursor->next();
+ BSONObj o = cursor->next().getOwned();
{
writelock lk( ns );
Helpers::upsert( ns , o );
@@ -756,7 +760,11 @@ namespace mongo {
BSONObj res;
if ( ! conn->runCommand( "admin" , BSON( "_transferMods" << 1 ) , res ) ){
state = FAIL;
+ errmsg = "_transferMods failed: ";
+ errmsg += res.toString();
log( LL_ERROR ) << "_transferMods failed: " << res << endl;
+ conn.done();
+ return;
}
if ( res["size"].number() == 0 )
break;
@@ -775,6 +783,7 @@ namespace mongo {
log() << "_transferMods failed in STEADY state: " << res << endl;
errmsg = res.toString();
state = FAIL;
+ conn.done();
return;
}
@@ -801,9 +810,10 @@ namespace mongo {
b.append( "from" , from );
b.append( "min" , min );
b.append( "max" , max );
-
+
b.append( "state" , stateString() );
-
+ if ( state == FAIL )
+ b.append( "errmsg" , errmsg );
{
BSONObjBuilder bb( b.subobjStart( "counts" ) );
bb.append( "cloned" , numCloned );
diff --git a/s/d_state.cpp b/s/d_state.cpp
index dd2fece..16f46cd 100644
--- a/s/d_state.cpp
+++ b/s/d_state.cpp
@@ -603,7 +603,8 @@ namespace mongo {
BSONObj x = loc.obj().extractFields(_key);
MyMap::const_iterator a = _map.upper_bound( x );
- a--;
+ if ( a != _map.begin() )
+ a--;
bool good = x.woCompare( a->second.first ) >= 0 && x.woCompare( a->second.second ) < 0;
#if 0
diff --git a/s/d_writeback.cpp b/s/d_writeback.cpp
index 738d4d4..a18e5d5 100644
--- a/s/d_writeback.cpp
+++ b/s/d_writeback.cpp
@@ -52,7 +52,7 @@ namespace mongo {
class WriteBackCommand : public Command {
public:
virtual LockType locktype() const { return NONE; }
- virtual bool slaveOk() const { return false; }
+ virtual bool slaveOk() const { return true; }
virtual bool adminOnly() const { return true; }
WriteBackCommand() : Command( "writebacklisten" ){}
diff --git a/s/request.cpp b/s/request.cpp
index e09c3bc..ec245d7 100644
--- a/s/request.cpp
+++ b/s/request.cpp
@@ -123,7 +123,8 @@ namespace mongo {
log() << staleConfig.what() << " attempt: " << attempt << endl;
uassert( 10195 , "too many attempts to update config, failing" , attempt < 5 );
ShardConnection::checkMyConnectionVersions( getns() );
- sleepsecs( attempt );
+ if (!staleConfig.justConnection() )
+ sleepsecs( attempt );
reset( ! staleConfig.justConnection() );
_d.markReset();
process( attempt + 1 );
diff --git a/s/server.cpp b/s/server.cpp
index 5c6ac9b..11f688c 100644
--- a/s/server.cpp
+++ b/s/server.cpp
@@ -92,10 +92,12 @@ namespace mongo {
r.process();
}
catch ( DBException& e ){
+ log() << "DBException in process: " << e.what() << endl;
+
le->raiseError( e.getCode() , e.what() );
m.header()->id = r.id();
- log() << "UserException: " << e.what() << endl;
+
if ( r.expectResponse() ){
BSONObj err = BSON( "$err" << e.what() << "code" << e.getCode() );
replyToQuery( ResultFlag_ErrSet, p , m , err );
diff --git a/s/util.h b/s/util.h
index 63df489..8d78fe8 100644
--- a/s/util.h
+++ b/s/util.h
@@ -58,11 +58,19 @@ namespace mongo {
}
}
- ShardChunkVersion incMajor() const {
- return ShardChunkVersion( _major + 1 , 0 );
+ void inc( bool major ){
+ if ( major )
+ incMajor();
+ else
+ incMinor();
}
- void operator++(){
+ void incMajor() {
+ _major++;
+ _minor = 0;
+ }
+
+ void incMinor() {
_minor++;
}
@@ -79,8 +87,9 @@ namespace mongo {
ss << _major << "|" << _minor;
return ss.str();
}
+
operator unsigned long long() const { return _combined; }
-
+
ShardChunkVersion& operator=( const BSONElement& elem ){
switch ( elem.type() ){
case Timestamp:
diff --git a/shell/servers.js b/shell/servers.js
index dc33de6..23e52c6 100644
--- a/shell/servers.js
+++ b/shell/servers.js
@@ -168,6 +168,11 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
var rs = new ReplSetTest( { name : testName + "-rs" + i , nodes : 3 , startPort : 31100 + ( i * 100 ) } );
this._rs[i] = { test : rs , nodes : rs.startSet( { oplogSize:40 } ) , url : rs.getURL() };
rs.initiate();
+
+ }
+
+ for ( var i=0; i<numShards; i++){
+ var rs = this._rs[i].test;
rs.getMaster().getDB( "admin" ).foo.save( { x : 1 } )
rs.awaitReplication();
this._connections.push( new Mongo( rs.getURL() ) );
diff --git a/util/background.cpp b/util/background.cpp
index a6d8290..ec5483c 100644
--- a/util/background.cpp
+++ b/util/background.cpp
@@ -65,8 +65,7 @@ namespace mongo {
}
bool BackgroundJob::wait(int msMax, unsigned maxsleep) {
- assert( state != NotStarted );
- unsigned ms = 0;
+ unsigned ms = 1;
Date_t start = jsTime();
while ( state != Done ) {
sleepmillis(ms);
@@ -84,7 +83,7 @@ namespace mongo {
/* wait for several jobs to finish. */
void BackgroundJob::wait(list<BackgroundJob*>& L, unsigned maxsleep) {
- unsigned ms = 0;
+ unsigned ms = 1;
{
x:
sleepmillis(ms);
diff --git a/util/log.h b/util/log.h
index 152fb80..1f11c81 100644
--- a/util/log.h
+++ b/util/log.h
@@ -158,15 +158,7 @@ namespace mongo {
static vector<Tee*> * globalTees;
public:
- static void logLockless( const StringData& s ){
- if ( doneSetup == 1717 ){
- fwrite( s.data() , s.size() , 1 , logfile );
- fflush( logfile );
- }
- else {
- cout << s.data() << endl;
- }
- }
+ inline static void logLockless( const StringData& s );
static void setLogFile(FILE* f){
scoped_lock lk(mutex);
@@ -177,52 +169,9 @@ namespace mongo {
return 1717;
}
- void flush(Tee *t = 0) {
- // this ensures things are sane
- if ( doneSetup == 1717 ) {
- string msg = ss.str();
- string threadName = getThreadName();
- const char * type = logLevelToString(logLevel);
-
- int spaceNeeded = msg.size() + 64 + threadName.size();
- int bufSize = 128;
- while ( bufSize < spaceNeeded )
- bufSize += 128;
-
- BufBuilder b(bufSize);
- time_t_to_String( time(0) , b.grow(20) );
- if (!threadName.empty()){
- b.appendChar( '[' );
- b.appendStr( threadName , false );
- b.appendChar( ']' );
- b.appendChar( ' ' );
- }
- if ( type[0] ){
- b.appendStr( type , false );
- b.appendStr( ": " , false );
- }
- b.appendStr( msg );
-
- string out( b.buf() , b.len() - 1);
-
- scoped_lock lk(mutex);
-
- if( t ) t->write(logLevel,out);
- if ( globalTees ){
- for ( unsigned i=0; i<globalTees->size(); i++ )
- (*globalTees)[i]->write(logLevel,out);
- }
-
-#ifndef _WIN32
- //syslog( LOG_INFO , "%s" , cc );
-#endif
- fwrite(out.data(), out.size(), 1, logfile);
- fflush(logfile);
- }
- _init();
- }
+ inline void flush(Tee *t = 0);
- Nullstream& setLogLevel(LogLevel l){
+ inline Nullstream& setLogLevel(LogLevel l){
logLevel = l;
return *this;
}
@@ -416,6 +365,69 @@ namespace mongo {
/** output the error # and error message with prefix.
handy for use as parm in uassert/massert.
*/
- string errnoWithPrefix( const char * prefix = 0 );
+ string errnoWithPrefix( const char * prefix );
+
+ void Logstream::logLockless( const StringData& s ){
+ if ( doneSetup == 1717 ){
+ if(fwrite(s.data(), s.size(), 1, logfile)){
+ fflush(logfile);
+ }else{
+ int x = errno;
+ cout << "Failed to write to logfile: " << errnoWithDescription(x) << ": " << out << endl;
+ }
+ }
+ else {
+ cout << s.data() << endl;
+ }
+ }
+
+ void Logstream::flush(Tee *t) {
+ // this ensures things are sane
+ if ( doneSetup == 1717 ) {
+ string msg = ss.str();
+ string threadName = getThreadName();
+ const char * type = logLevelToString(logLevel);
+
+ int spaceNeeded = msg.size() + 64 + threadName.size();
+ int bufSize = 128;
+ while ( bufSize < spaceNeeded )
+ bufSize += 128;
+
+ BufBuilder b(bufSize);
+ time_t_to_String( time(0) , b.grow(20) );
+ if (!threadName.empty()){
+ b.appendChar( '[' );
+ b.appendStr( threadName , false );
+ b.appendChar( ']' );
+ b.appendChar( ' ' );
+ }
+ if ( type[0] ){
+ b.appendStr( type , false );
+ b.appendStr( ": " , false );
+ }
+ b.appendStr( msg );
+
+ string out( b.buf() , b.len() - 1);
+
+ scoped_lock lk(mutex);
+
+ if( t ) t->write(logLevel,out);
+ if ( globalTees ){
+ for ( unsigned i=0; i<globalTees->size(); i++ )
+ (*globalTees)[i]->write(logLevel,out);
+ }
+
+#ifndef _WIN32
+ //syslog( LOG_INFO , "%s" , cc );
+#endif
+ if(fwrite(out.data(), out.size(), 1, logfile)){
+ fflush(logfile);
+ }else{
+ int x = errno;
+ cout << "Failed to write to logfile: " << errnoWithDescription(x) << ": " << out << endl;
+ }
+ }
+ _init();
+ }
} // namespace mongo
diff --git a/util/message.cpp b/util/message.cpp
index d7c13dc..cfff420 100644
--- a/util/message.cpp
+++ b/util/message.cpp
@@ -168,7 +168,11 @@ namespace mongo {
const int ret = select(maxfd+1, fds, NULL, NULL, &maxSelectTime);
if (ret == 0){
+#if defined(__linux__)
_elapsedTime += ( 10000 - maxSelectTime.tv_usec ) / 1000;
+#else
+ _elapsedTime += 10;
+#endif
continue;
}
_elapsedTime += ret; // assume 1ms to grab connection. very rough
diff --git a/util/version.cpp b/util/version.cpp
index c49c064..aacebcd 100644
--- a/util/version.cpp
+++ b/util/version.cpp
@@ -14,7 +14,7 @@ namespace mongo {
// mongo processes version support
//
- const char versionString[] = "1.6.0";
+ const char versionString[] = "1.6.1";
string mongodVersion() {
stringstream ss;