summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--client/dbclient.h4
-rw-r--r--db/dbcommands.cpp2
-rw-r--r--db/dbhelpers.cpp4
-rw-r--r--db/geo/2d.cpp68
-rw-r--r--db/instance.cpp12
-rw-r--r--db/namespace.cpp6
-rw-r--r--db/query.cpp7
-rw-r--r--db/queryutil.cpp25
-rw-r--r--db/repl.cpp2
-rw-r--r--db/repl/connections.h25
-rw-r--r--db/repl/consensus.cpp15
-rw-r--r--db/repl/health.cpp6
-rw-r--r--db/repl/heartbeat.cpp2
-rw-r--r--db/repl/multicmd.h2
-rw-r--r--db/repl/rs_config.cpp34
-rw-r--r--db/repl/rs_member.h6
-rw-r--r--db/repl/rs_sync.cpp9
-rw-r--r--db/update.cpp8
-rw-r--r--dbtests/basictests.cpp37
-rw-r--r--debian/changelog14
-rw-r--r--debian/files1
-rw-r--r--debian/mongodb.upstart15
-rw-r--r--debian/preinst37
-rw-r--r--doxygenConfig2
-rw-r--r--jstests/or8.js12
-rw-r--r--jstests/repl/mastermaster1.js8
-rw-r--r--jstests/repl/pair3.js32
-rw-r--r--jstests/sharding/features3.js19
-rw-r--r--jstests/sharding/shard3.js4
-rw-r--r--jstests/slowNightly/sharding_migrate_cursor1.js65
-rw-r--r--jstests/updatee.js71
-rw-r--r--lib/libboost_thread-gcc41-mt-d-1_34_1.abin0 -> 692920 bytes
-rw-r--r--rpm/mongo.spec2
-rw-r--r--s/balance.cpp6
-rw-r--r--s/chunk.cpp2
-rw-r--r--s/commands_public.cpp6
-rw-r--r--s/config.cpp7
-rw-r--r--s/d_migrate.cpp35
-rw-r--r--s/grid.cpp4
-rw-r--r--shell/mongo_vstudio.cpp37
-rw-r--r--shell/servers.js18
-rw-r--r--shell/shell_utils.cpp4
-rw-r--r--shell/utils.js28
-rw-r--r--util/goodies.h43
-rw-r--r--util/message.cpp2
-rw-r--r--util/message.h4
-rw-r--r--util/ntservice.cpp5
-rw-r--r--util/sock.cpp2
-rw-r--r--util/sock.h6
-rw-r--r--util/version.cpp2
50 files changed, 616 insertions, 151 deletions
diff --git a/client/dbclient.h b/client/dbclient.h
index 5ca2b8f..9448055 100644
--- a/client/dbclient.h
+++ b/client/dbclient.h
@@ -796,7 +796,7 @@ namespace mongo {
void _checkConnection();
void checkConnection() { if( failed ) _checkConnection(); }
map< string, pair<string,string> > authCache;
- int _timeout;
+ double _timeout;
bool _connect( string& errmsg );
public:
@@ -807,7 +807,7 @@ namespace mongo {
@param timeout tcp timeout in seconds - this is for read/write, not connect.
Connect timeout is fixed, but short, at 5 seconds.
*/
- DBClientConnection(bool _autoReconnect=false, DBClientReplicaSet* cp=0, int timeout=0) :
+ DBClientConnection(bool _autoReconnect=false, DBClientReplicaSet* cp=0, double timeout=0) :
clientSet(cp), failed(false), autoReconnect(_autoReconnect), lastReconnectTry(0), _timeout(timeout) { }
/** Connect to a Mongo database server.
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index 96374d9..7bd7203 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -377,7 +377,7 @@ namespace mongo {
}
{
- BSONObjBuilder bb( result.subobjStart( "cursros" ) );
+ BSONObjBuilder bb( result.subobjStart( "cursors" ) );
ClientCursor::appendStats( bb );
bb.done();
}
diff --git a/db/dbhelpers.cpp b/db/dbhelpers.cpp
index 124deb8..205787e 100644
--- a/db/dbhelpers.cpp
+++ b/db/dbhelpers.cpp
@@ -301,6 +301,10 @@ namespace mongo {
c->checkLocation();
+ if ( yield && ! cc->yieldSometimes() ){
+ // cursor got finished by someone else, so we're done
+ break;
+ }
}
return num;
diff --git a/db/geo/2d.cpp b/db/geo/2d.cpp
index 19efafd..60818fc 100644
--- a/db/geo/2d.cpp
+++ b/db/geo/2d.cpp
@@ -1257,7 +1257,7 @@ namespace mongo {
_want._min = Point( _g , _bl );
_want._max = Point( _g , _tr );
-
+
uassert( 13064 , "need an area > 0 " , _want.area() > 0 );
_state = START;
@@ -1268,12 +1268,14 @@ namespace mongo {
GEODEBUG( "center : " << center.toString() << "\t" << _prefix );
- {
- GeoHash a(0LL,32);
- GeoHash b(0LL,32);
- b.move(1,1);
- _fudge = _g->distance(a,b);
- }
+ {
+ GeoHash a(0LL,32);
+ GeoHash b(0LL,32);
+ b.move(1,1);
+ _fudge = _g->distance(a,b);
+ }
+
+ _wantLen = _fudge + std::max((_want._max._x - _want._min._x), (_want._max._y - _want._min._y));
ok();
}
@@ -1308,32 +1310,47 @@ namespace mongo {
_state = DONE;
return;
}
-
- Box cur( _g , _prefix );
- if ( cur._min._x + _fudge < _want._min._x &&
- cur._min._y + _fudge < _want._min._y &&
- cur._max._x - _fudge > _want._max._x &&
- cur._max._y - _fudge > _want._max._y ){
-
- _state = DONE;
- GeoHash temp = _prefix.commonPrefix( cur._max.hash( _g ) );
- GEODEBUG( "box done : " << cur.toString() << " prefix:" << _prefix << " common:" << temp );
-
- if ( temp == _prefix )
- return;
- _prefix = temp;
- GEODEBUG( "\t one more loop" );
- continue;
- }
- else {
+ if (_g->sizeEdge(_prefix) < _wantLen){
_prefix = _prefix.up();
+ } else {
+ for (int i=-1; i<=1; i++){
+ for (int j=-1; j<=1; j++){
+
+ if (i == 0 && j == 0)
+ continue; // main box
+
+ GeoHash newBox = _prefix;
+ newBox.move(i, j);
+
+ PREFIXDEBUG(newBox, _g);
+
+ Box cur( _g , newBox );
+ if (_want.intersects(cur)){
+ // TODO consider splitting into quadrants
+ getPointsForPrefix(newBox);
+ } else {
+ GEODEBUG("skipping box");
+ }
+ }
+ }
+ _state = DONE;
}
+
}
return;
}
}
+
+ void getPointsForPrefix(const GeoHash& prefix){
+ if ( ! BtreeLocation::initial( *_id , _spec , _min , _max , prefix , _found , this ) ){
+ return;
+ }
+
+ while ( _min.hasPrefix( prefix ) && _min.advance( -1 , _found , this ) );
+ while ( _max.hasPrefix( prefix ) && _max.advance( 1 , _found , this ) );
+ }
virtual bool checkDistance( const GeoHash& h , double& d ){
bool res = _want.inside( Point( _g , h ) , _fudge );
@@ -1346,6 +1363,7 @@ namespace mongo {
GeoHash _bl;
GeoHash _tr;
Box _want;
+ double _wantLen;
int _found;
diff --git a/db/instance.cpp b/db/instance.cpp
index 9e81464..a6873f2 100644
--- a/db/instance.cpp
+++ b/db/instance.cpp
@@ -499,6 +499,7 @@ namespace mongo {
if( ntoreturn )
ss << " ntoreturn:" << ntoreturn;
+ time_t start = 0;
int pass = 0;
bool exhaust = false;
QueryResult* msgdata;
@@ -511,6 +512,17 @@ namespace mongo {
catch ( GetMoreWaitException& ) {
exhaust = false;
massert(13073, "shutting down", !inShutdown() );
+ if( pass == 0 ) {
+ start = time(0);
+ }
+ else {
+ if( time(0) - start >= 4 ) {
+ // after about 4 seconds, return. this is a sanity check. pass stops at 1000 normally
+ // for DEV this helps and also if sleep is highly inaccurate on a platform. we want to
+ // return occasionally so slave can checkpoint.
+ pass = 10000;
+ }
+ }
pass++;
DEV
sleepmillis(20);
diff --git a/db/namespace.cpp b/db/namespace.cpp
index de3f4df..8a1ab6f 100644
--- a/db/namespace.cpp
+++ b/db/namespace.cpp
@@ -564,8 +564,10 @@ namespace mongo {
}
void renameNamespace( const char *from, const char *to ) {
- NamespaceIndex *ni = nsindex( from );
- assert( ni && ni->details( from ) && !ni->details( to ) );
+ NamespaceIndex *ni = nsindex( from );
+ assert( ni );
+ assert( ni->details( from ) );
+ assert( ! ni->details( to ) );
// Our namespace and index details will move to a different
// memory location. The only references to namespace and
diff --git a/db/query.cpp b/db/query.cpp
index 5bd7b00..154fd15 100644
--- a/db/query.cpp
+++ b/db/query.cpp
@@ -320,7 +320,6 @@ namespace mongo {
while ( 1 ) {
if ( !c->ok() ) {
-// log() << "TEMP Tailable : " << c->tailable() << ' ' << (queryOptions & QueryOption_AwaitData) << endl;
if ( c->tailable() ) {
/* when a tailable cursor hits "EOF", ok() goes false, and current() is null. however
advance() can still be retries as a reactivation attempt. when there is new data, it will
@@ -787,8 +786,10 @@ namespace mongo {
if ( _pq.isExplain()) {
_eb.noteScan( _c.get(), _nscanned, _nscannedObjects, _n, scanAndOrderRequired(), _curop.elapsedMillis(), useHints && !_pq.getHint().eoo() );
} else {
- _response.appendData( _buf.buf(), _buf.len() );
- _buf.decouple();
+ if (_buf.len()) {
+ _response.appendData( _buf.buf(), _buf.len() );
+ _buf.decouple();
+ }
}
if ( stop ) {
setStop();
diff --git a/db/queryutil.cpp b/db/queryutil.cpp
index 007a1ce..2153046 100644
--- a/db/queryutil.cpp
+++ b/db/queryutil.cpp
@@ -982,23 +982,16 @@ namespace mongo {
BSONElement kk = k.next();
int number = (int) kk.number();
bool forward = ( number >= 0 ? 1 : -1 ) * ( _direction >= 0 ? 1 : -1 ) > 0;
- BSONElement e = obj.getField( kk.fieldName() );
- if ( e.eoo() ) {
- e = staticNull.firstElement();
- }
- if ( e.type() == Array ) {
- BSONObjIterator j( e.embeddedObject() );
- bool match = false;
- while( j.more() ) {
- if ( matchesElement( j.next(), i, forward ) ) {
- match = true;
- break;
- }
- }
- if ( !match ) {
- return false;
+ BSONElementSet keys;
+ obj.getFieldsDotted( kk.fieldName(), keys );
+ bool match = false;
+ for( BSONElementSet::const_iterator j = keys.begin(); j != keys.end(); ++j ) {
+ if ( matchesElement( *j, i, forward ) ) {
+ match = true;
+ break;
}
- } else if ( !matchesElement( e, i, forward ) ) {
+ }
+ if ( !match ) {
return false;
}
}
diff --git a/db/repl.cpp b/db/repl.cpp
index 085ae64..ea0eab9 100644
--- a/db/repl.cpp
+++ b/db/repl.cpp
@@ -658,6 +658,8 @@ namespace mongo {
ReplSource tmp(c->current());
if ( tmp.hostName != cmdLine.source ) {
log() << "repl: --source " << cmdLine.source << " != " << tmp.hostName << " from local.sources collection" << endl;
+ log() << "repl: for instructions on changing this slave's source, see:" << endl;
+ log() << "http://dochub.mongodb.org/core/masterslave" << endl;
log() << "repl: terminating mongod after 30 seconds" << endl;
sleepsecs(30);
dbexit( EXIT_REPLICATION_ERROR );
diff --git a/db/repl/connections.h b/db/repl/connections.h
index 95defe4..cdf2fad 100644
--- a/db/repl/connections.h
+++ b/db/repl/connections.h
@@ -44,19 +44,36 @@ namespace mongo {
/** throws assertions if connect failure etc. */
ScopedConn(string hostport);
~ScopedConn();
- DBClientConnection* operator->();
+
+ /* If we were to run a query and not exhaust the cursor, future use of the connection would be problematic.
+ So here what we do is wrapper known safe methods and not allow cursor-style queries at all. This makes
+ ScopedConn limited in functionality but very safe. More non-cursor wrappers can be added here if needed.
+ */
+
+ bool runCommand(const string &dbname, const BSONObj& cmd, BSONObj &info, int options=0) {
+ return conn()->runCommand(dbname, cmd, info, options);
+ }
+ unsigned long long count(const string &ns) {
+ return conn()->count(ns);
+ }
+ BSONObj findOne(const string &ns, const Query& q, const BSONObj *fieldsToReturn = 0, int queryOptions = 0) {
+ return conn()->findOne(ns, q, fieldsToReturn, queryOptions);
+ }
+
private:
auto_ptr<scoped_lock> connLock;
static mutex mapMutex;
struct X {
mutex z;
DBClientConnection cc;
- X() : z("X"), cc(/*reconnect*/true, 0, /*timeout*/10) {
+ X() : z("X"), cc(/*reconnect*/ true, 0,
+ /*timeout*/ theReplSet ? theReplSet->config().ho.heartbeatTimeoutMillis/1000.0 : 10.0) {
cc._logLevel = 2;
}
} *x;
typedef map<string,ScopedConn::X*> M;
static M& _map;
+ DBClientConnection* conn() { return &x->cc; }
};
inline ScopedConn::ScopedConn(string hostport) {
@@ -84,8 +101,8 @@ namespace mongo {
// conLock releases...
}
- inline DBClientConnection* ScopedConn::operator->() {
+ /*inline DBClientConnection* ScopedConn::operator->() {
return &x->cc;
- }
+ }*/
}
diff --git a/db/repl/consensus.cpp b/db/repl/consensus.cpp
index 4044538..1519c26 100644
--- a/db/repl/consensus.cpp
+++ b/db/repl/consensus.cpp
@@ -134,6 +134,9 @@ namespace mongo {
OID round = cmd["round"].OID();
int myver = rs.config().version;
+ const Member* primary = rs.box.getPrimary();
+ const Member* hopeful = rs.findById(whoid);
+
int vote = 0;
if( set != rs.name() ) {
log() << "replSet error received an elect request for '" << set << "' but our set name is '" << rs.name() << "'" << rsLog;
@@ -147,6 +150,16 @@ namespace mongo {
log() << "replSet info got stale version # during election" << rsLog;
vote = -10000;
}
+ else if( !hopeful ) {
+ log() << "couldn't find member with id " << whoid << rsLog;
+ vote = -10000;
+ }
+ else if( primary && primary->hbinfo().opTime > hopeful->hbinfo().opTime ) {
+ // other members might be aware of more up-to-date nodes
+ log() << hopeful->fullName() << " is trying to elect itself but " <<
+ primary->fullName() << " is already primary and more up-to-date" << rsLog;
+ vote = -10000;
+ }
else {
try {
vote = yea(whoid);
@@ -165,7 +178,7 @@ namespace mongo {
void ReplSetImpl::_getTargets(list<Target>& L, int& configVersion) {
configVersion = config().version;
for( Member *m = head(); m; m=m->next() )
- if( m->hbinfo().up() )
+ if( m->hbinfo().maybeUp() )
L.push_back( Target(m->fullName()) );
}
diff --git a/db/repl/health.cpp b/db/repl/health.cpp
index 72396fe..c75221c 100644
--- a/db/repl/health.cpp
+++ b/db/repl/health.cpp
@@ -19,6 +19,7 @@
#include "health.h"
#include "../../util/background.h"
#include "../../client/dbclient.h"
+#include "../../client/connpool.h"
#include "../commands.h"
#include "../../util/concurrency/value.h"
#include "../../util/concurrency/task.h"
@@ -186,7 +187,7 @@ namespace mongo {
//const bo fields = BSON( "o" << false << "o2" << false );
const bo fields;
- ScopedConn conn(m->fullName());
+ ScopedDbConnection conn(m->fullName());
auto_ptr<DBClientCursor> c = conn->query(rsoplog, Query().sort("$natural",1), 20, 0, &fields);
if( c.get() == 0 ) {
@@ -245,8 +246,6 @@ namespace mongo {
ss << _table();
ss << p(time_t_to_String_short(time(0)) + " current time");
- //ss << "</pre>\n";
-
if( !otEnd.isNull() ) {
ss << "<p>Log length in time: ";
unsigned d = otEnd.getSecs() - otFirst.getSecs();
@@ -259,6 +258,7 @@ namespace mongo {
ss << "</p>\n";
}
+ conn.done();
}
void ReplSetImpl::_summarizeAsHtml(stringstream& s) const {
diff --git a/db/repl/heartbeat.cpp b/db/repl/heartbeat.cpp
index 4f28897..b39fad7 100644
--- a/db/repl/heartbeat.cpp
+++ b/db/repl/heartbeat.cpp
@@ -134,7 +134,7 @@ namespace mongo {
assert( theReplSet == 0 || !theReplSet->lockedByMe() );
ScopedConn conn(memberFullName);
- return conn->runCommand("admin", cmd, result);
+ return conn.runCommand("admin", cmd, result, 0);
}
/* poll every other set member to check its status */
diff --git a/db/repl/multicmd.h b/db/repl/multicmd.h
index 61c9b5f..9eb9a17 100644
--- a/db/repl/multicmd.h
+++ b/db/repl/multicmd.h
@@ -43,7 +43,7 @@ namespace mongo {
void run() {
try {
ScopedConn c(d.toHost);
- d.ok = c->runCommand("admin", cmd, d.result);
+ d.ok = c.runCommand("admin", cmd, d.result);
}
catch(DBException&) {
DEV log() << "dev caught dbexception on multiCommand " << d.toHost << rsLog;
diff --git a/db/repl/rs_config.cpp b/db/repl/rs_config.cpp
index 85c9a46..371507d 100644
--- a/db/repl/rs_config.cpp
+++ b/db/repl/rs_config.cpp
@@ -302,9 +302,8 @@ namespace mongo {
clear();
int level = 2;
DEV level = 0;
- //log(0) << "replSet load config from: " << h.toString() << rsLog;
- auto_ptr<DBClientCursor> c;
+ BSONObj cfg;
int v = -5;
try {
if( h.isSelf() ) {
@@ -337,13 +336,28 @@ namespace mongo {
}
v = -4;
- ScopedConn conn(h.toString());
- v = -3;
- c = conn->query(rsConfigNs);
- if( c.get() == 0 ) {
- version = v; return;
+ unsigned long long count = 0;
+ try {
+ ScopedConn conn(h.toString());
+ v = -3;
+ cfg = conn.findOne(rsConfigNs, Query()).getOwned();
+ count = conn.count(rsConfigNs);
+ }
+ catch ( DBException& e) {
+ if ( !h.isSelf() ) {
+ throw;
+ }
+
+ // on startup, socket is not listening yet
+ DBDirectClient cli;
+ cfg = cli.findOne( rsConfigNs, Query() ).getOwned();
+ count = cli.count(rsConfigNs);
}
- if( !c->more() ) {
+
+ if( count > 1 )
+ uasserted(13109, str::stream() << "multiple rows in " << rsConfigNs << " not supported host: " << h.toString());
+
+ if( cfg.isEmpty() ) {
version = EMPTYCONFIG;
return;
}
@@ -355,9 +369,7 @@ namespace mongo {
return;
}
- BSONObj o = c->nextSafe();
- uassert(13109, "multiple rows in " + rsConfigNs + " not supported", !c->more());
- from(o);
+ from(cfg);
checkRsConfig();
_ok = true;
log(level) << "replSet load config ok from " << (h.isSelf() ? "self" : h.toString()) << rsLog;
diff --git a/db/repl/rs_member.h b/db/repl/rs_member.h
index 6a797b5..099cb22 100644
--- a/db/repl/rs_member.h
+++ b/db/repl/rs_member.h
@@ -67,7 +67,6 @@ namespace mongo {
public:
HeartbeatInfo() : _id(0xffffffff),hbstate(MemberState::RS_UNKNOWN),health(-1.0),downSince(0),skew(INT_MIN) { }
HeartbeatInfo(unsigned id);
- bool up() const { return health > 0; }
unsigned id() const { return _id; }
MemberState hbstate;
double health;
@@ -78,6 +77,11 @@ namespace mongo {
OpTime opTime;
int skew;
+ bool up() const { return health > 0; }
+
+ /** health is set to -1 on startup. that means we haven't even checked yet. 0 means we checked and it failed. */
+ bool maybeUp() const { return health != 0; }
+
long long timeDown() const; // ms
/* true if changed in a way of interest to the repl set manager. */
diff --git a/db/repl/rs_sync.cpp b/db/repl/rs_sync.cpp
index 9ea65cf..9de3f60 100644
--- a/db/repl/rs_sync.cpp
+++ b/db/repl/rs_sync.cpp
@@ -70,7 +70,14 @@ namespace mongo {
return false;
}
- r.query(rsoplog, bo());
+ {
+ BSONObjBuilder q;
+ q.appendDate("$gte", applyGTE.asDate());
+ BSONObjBuilder query;
+ query.append("ts", q.done());
+ BSONObj queryObj = query.done();
+ r.query(rsoplog, queryObj);
+ }
assert( r.haveCursor() );
/* we lock outside the loop to avoid the overhead of locking on every operation. server isn't usable yet anyway! */
diff --git a/db/update.cpp b/db/update.cpp
index d4a038b..e178e0f 100644
--- a/db/update.cpp
+++ b/db/update.cpp
@@ -517,10 +517,12 @@ namespace mongo {
DEBUGUPDATE( "\t\t createNewFromMods root: " << root );
BSONObjIteratorSorted es( obj );
BSONElement e = es.next();
-
+
ModStateHolder::iterator m = _mods.lower_bound( root );
- ModStateHolder::iterator mend = _mods.lower_bound( root + '{' );
-
+ StringBuilder buf(root.size() + 2 );
+ buf << root << (char)255;
+ ModStateHolder::iterator mend = _mods.lower_bound( buf.str() );
+
set<string> onedownseen;
while ( e.type() && m != mend ){
diff --git a/dbtests/basictests.cpp b/dbtests/basictests.cpp
index 27f7cdc..f1e788a 100644
--- a/dbtests/basictests.cpp
+++ b/dbtests/basictests.cpp
@@ -319,6 +319,9 @@ namespace BasicTests {
class LexNumCmp {
public:
void run() {
+
+ ASSERT( ! isNumber( (char)255 ) );
+
ASSERT_EQUALS( 0, lexNumCmp( "a", "a" ) );
ASSERT_EQUALS( -1, lexNumCmp( "a", "aa" ) );
ASSERT_EQUALS( 1, lexNumCmp( "aa", "a" ) );
@@ -346,7 +349,7 @@ namespace BasicTests {
ASSERT_EQUALS( -1, lexNumCmp( "f12f", "f12g" ) );
ASSERT_EQUALS( 1, lexNumCmp( "f12g", "f12f" ) );
ASSERT_EQUALS( 1, lexNumCmp( "aa{", "aab" ) );
- ASSERT_EQUALS( 1, lexNumCmp( "aa{", "aa1" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "aa{", "aa1" ) );
ASSERT_EQUALS( -1, lexNumCmp( "a1{", "a11" ) );
ASSERT_EQUALS( 1, lexNumCmp( "a1{a", "a1{" ) );
ASSERT_EQUALS( -1, lexNumCmp( "a1{", "a1{a" ) );
@@ -355,6 +358,38 @@ namespace BasicTests {
ASSERT_EQUALS( -1 , lexNumCmp( "a.0" , "a.1" ) );
ASSERT_EQUALS( -1 , lexNumCmp( "a.0.b" , "a.1" ) );
+
+ ASSERT_EQUALS( -1 , lexNumCmp( "b." , "b.|" ) );
+ ASSERT_EQUALS( -1 , lexNumCmp( "b.0e" , (string("b.") + (char)255).c_str() ) );
+ ASSERT_EQUALS( -1 , lexNumCmp( "b." , "b.0e" ) );
+
+ ASSERT_EQUALS( 0, lexNumCmp( "238947219478347782934718234", "238947219478347782934718234"));
+ ASSERT_EQUALS( 0, lexNumCmp( "000238947219478347782934718234", "238947219478347782934718234"));
+ ASSERT_EQUALS( 1, lexNumCmp( "000238947219478347782934718235", "238947219478347782934718234"));
+ ASSERT_EQUALS( -1, lexNumCmp( "238947219478347782934718234", "238947219478347782934718234.1"));
+ ASSERT_EQUALS( 0, lexNumCmp( "238", "000238"));
+ ASSERT_EQUALS( 0, lexNumCmp( "002384", "0002384"));
+ ASSERT_EQUALS( 0, lexNumCmp( "00002384", "0002384"));
+ ASSERT_EQUALS( 0, lexNumCmp( "0", "0"));
+ ASSERT_EQUALS( 0, lexNumCmp( "0000", "0"));
+ ASSERT_EQUALS( 0, lexNumCmp( "0", "000"));
+ ASSERT_EQUALS( -1, lexNumCmp( "0000", "0.0"));
+ ASSERT_EQUALS( 1, lexNumCmp( "2380", "238"));
+ ASSERT_EQUALS( 1, lexNumCmp( "2385", "2384"));
+ ASSERT_EQUALS( 1, lexNumCmp( "2385", "02384"));
+ ASSERT_EQUALS( 1, lexNumCmp( "2385", "002384"));
+ ASSERT_EQUALS( -1, lexNumCmp( "123.234.4567", "00238"));
+ ASSERT_EQUALS( 0, lexNumCmp( "123.234", "00123.234"));
+ ASSERT_EQUALS( 0, lexNumCmp( "a.123.b", "a.00123.b"));
+ ASSERT_EQUALS( 1, lexNumCmp( "a.123.b", "a.b.00123.b"));
+ ASSERT_EQUALS( -1, lexNumCmp( "a.00.0", "a.0.1"));
+ ASSERT_EQUALS( 0, lexNumCmp( "01.003.02", "1.3.2"));
+ ASSERT_EQUALS( -1, lexNumCmp( "1.3.2", "10.300.20"));
+ ASSERT_EQUALS( 0, lexNumCmp( "10.300.20", "000000000000010.0000300.000000020"));
+ ASSERT_EQUALS( 0, lexNumCmp( "0000a", "0a"));
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "0a"));
+ ASSERT_EQUALS( -1, lexNumCmp( "000a", "001a"));
+ ASSERT_EQUALS( 0, lexNumCmp( "010a", "0010a"));
}
};
diff --git a/debian/changelog b/debian/changelog
index 47cf19b..c3b32b6 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,17 @@
+mongodb (1.6.5) unstable; urgency=low
+
+ * full change log http://jira.mongodb.org/browse/SERVER/fixforversion/10207
+
+ -- Richard Kreuter <richard@10gen.com> Tue, 7 Dec 2010 16:56:28 -0500
+
+mongodb (1.6.4) unstable; urgency=low
+
+ * replica_sets shell helpers
+ * sharding chunk safety, yielding during migrate cleanup
+ * full change log http://jira.mongodb.org/browse/SERVER/fixforversion/10191
+
+ -- Richard Kreuter <richard@10gen.com> Tue, 26 Oct 2010 16:56:28 -0500
+
mongodb (1.6.3) unstable; urgency=low
* replica_sets slavedelay, rollback
diff --git a/debian/files b/debian/files
new file mode 100644
index 0000000..2e28959
--- /dev/null
+++ b/debian/files
@@ -0,0 +1 @@
+mongodb_0.9.7_amd64.deb devel optional
diff --git a/debian/mongodb.upstart b/debian/mongodb.upstart
new file mode 100644
index 0000000..ca6f9b7
--- /dev/null
+++ b/debian/mongodb.upstart
@@ -0,0 +1,15 @@
+# Ubuntu upstart file at /etc/init/mongodb.conf
+
+pre-start script
+ mkdir -p /var/lib/mongodb/
+ mkdir -p /var/log/mongodb/
+end script
+
+start on runlevel [2345]
+stop on runlevel [06]
+
+script
+ ENABLE_MONGODB="yes"
+ if [ -f /etc/default/mongodb ]; then . /etc/default/mongodb; fi
+ if [ "x$ENABLE_MONGODB" = "xyes" ]; then exec start-stop-daemon --start --quiet --chuid mongodb --exec /usr/bin/mongod -- --config /etc/mongodb.conf; fi
+end script
diff --git a/debian/preinst b/debian/preinst
new file mode 100644
index 0000000..c2d5362
--- /dev/null
+++ b/debian/preinst
@@ -0,0 +1,37 @@
+#!/bin/sh
+# preinst script for mongodb
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * <new-preinst> `install'
+# * <new-preinst> `install' <old-version>
+# * <new-preinst> `upgrade' <old-version>
+# * <old-preinst> `abort-upgrade' <new-version>
+# for details, see http://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ install|upgrade)
+ ;;
+
+ abort-upgrade)
+ ;;
+
+ *)
+ echo "preinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
+
+
diff --git a/doxygenConfig b/doxygenConfig
index ea7391a..9d4bbfb 100644
--- a/doxygenConfig
+++ b/doxygenConfig
@@ -3,7 +3,7 @@
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = MongoDB
-PROJECT_NUMBER = 1.6.3
+PROJECT_NUMBER = 1.6.6-pre-
OUTPUT_DIRECTORY = docs/doxygen
CREATE_SUBDIRS = NO
OUTPUT_LANGUAGE = English
diff --git a/jstests/or8.js b/jstests/or8.js
index 7a5c709..d08f227 100644
--- a/jstests/or8.js
+++ b/jstests/or8.js
@@ -14,3 +14,15 @@ t.find({ $or: [ { a: {$in:[]} } ] } ).toArray();
assert.eq.automsg( "2", "t.find({ $or: [ { a: {$in:[]} }, {a:1}, {a:3} ] } ).toArray().length" );
assert.eq.automsg( "2", "t.find({ $or: [ {a:1}, { a: {$in:[]} }, {a:3} ] } ).toArray().length" );
assert.eq.automsg( "2", "t.find({ $or: [ {a:1}, {a:3}, { a: {$in:[]} } ] } ).toArray().length" );
+
+// nested negate field
+
+t.drop();
+t.save( {a:{b:1,c:1}} );
+t.ensureIndex( { 'a.b':1 } );
+t.ensureIndex( { 'a.c':1 } );
+assert.eq( 1, t.find( {$or: [ { 'a.b':1 }, { 'a.c':1 } ] } ).itcount() );
+
+t.remove();
+t.save( {a:[{b:1,c:1},{b:2,c:1}]} );
+assert.eq( 1, t.find( {$or: [ { 'a.b':2 }, { 'a.c':1 } ] } ).itcount() );
diff --git a/jstests/repl/mastermaster1.js b/jstests/repl/mastermaster1.js
index d0fcec3..9f9334b 100644
--- a/jstests/repl/mastermaster1.js
+++ b/jstests/repl/mastermaster1.js
@@ -16,9 +16,13 @@ ldb = left.getDB( "test" )
rdb = right.getDB( "test" )
ldb.foo.insert( { _id : 1 , x : "eliot" } )
-ldb.runCommand( { getlasterror : 1 , w : 2 } )
+var result = ldb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 20000 } );
+printjson(result);
rdb.foo.insert( { _id : 2 , x : "sara" } )
-rdb.runCommand( { getlasterror : 1 , w : 2 } )
+result = rdb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 20000 } )
+printjson(result);
+
+print( "check 3" )
assert.eq( 2 , ldb.foo.count() , "B1" )
assert.eq( 2 , rdb.foo.count() , "B2" )
diff --git a/jstests/repl/pair3.js b/jstests/repl/pair3.js
index d1cf99a..d5fdf7e 100644
--- a/jstests/repl/pair3.js
+++ b/jstests/repl/pair3.js
@@ -12,32 +12,40 @@ ismaster = function( n ) {
// bring up node connections before arbiter connections so that arb can forward to node when expected
connect = function() {
if ( lp == null ) {
+ print("connecting lp");
lp = startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort );
}
if ( rp == null ) {
+ print("connecting rp");
rp = startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort );
}
if ( al == null ) {
+ print("connecting al");
al = startMongoProgram( "mongobridge", "--port", alPort, "--dest", "localhost:" + aPort );
}
if ( ar == null ) {
+ print("connecting ar");
ar = startMongoProgram( "mongobridge", "--port", arPort, "--dest", "localhost:" + aPort );
}
}
-disconnectNode = function( mongo ) {
+disconnectNode = function( mongo ) {
if ( lp ) {
+ print("disconnecting lp: "+lpPort);
stopMongoProgram( lpPort );
lp = null;
}
if ( rp ) {
+ print("disconnecting rp: "+rpPort);
stopMongoProgram( rpPort );
rp = null;
}
if ( mongo.host.match( new RegExp( "^127.0.0.1:" + lPort + "$" ) ) ) {
+ print("disconnecting al: "+alPort);
stopMongoProgram( alPort );
al = null;
} else if ( mongo.host.match( new RegExp( "^127.0.0.1:" + rPort + "$" ) ) ) {
+ print("disconnecting ar: "+arPort);
stopMongoProgram( arPort );
ar = null;
} else {
@@ -64,47 +72,47 @@ doTest1 = function() {
pair = new ReplPair( l, r, a );
- // normal startup
+ print("normal startup");
pair.start();
pair.waitForSteadyState();
- // disconnect slave
+ print("disconnect slave");
disconnectNode( pair.slave() );
pair.waitForSteadyState( [ 1, -3 ], pair.master().host );
- // disconnect master
+ print("disconnect master");
disconnectNode( pair.master() );
pair.waitForSteadyState( [ -3, -3 ] );
- // reconnect
+ print("reconnect");
connect();
pair.waitForSteadyState();
- // disconnect master
+ print("disconnect master");
disconnectNode( pair.master() );
pair.waitForSteadyState( [ 1, -3 ], pair.slave().host, true );
- // disconnect new master
+ print("disconnect new master");
disconnectNode( pair.master() );
pair.waitForSteadyState( [ -3, -3 ] );
- // reconnect
+ print("reconnect");
connect();
pair.waitForSteadyState();
- // disconnect slave
+ print("disconnect slave");
disconnectNode( pair.slave() );
pair.waitForSteadyState( [ 1, -3 ], pair.master().host );
- // reconnect slave
+ print("reconnect slave");
connect();
pair.waitForSteadyState( [ 1, 0 ], pair.master().host );
- // disconnect master
+ print("disconnect master");
disconnectNode( pair.master() );
pair.waitForSteadyState( [ 1, -3 ], pair.slave().host, true );
- // reconnect old master
+ print("reconnect old master");
connect();
pair.waitForSteadyState( [ 1, 0 ], pair.master().host );
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
index a2a8197..b15ccd3 100644
--- a/jstests/sharding/features3.js
+++ b/jstests/sharding/features3.js
@@ -24,10 +24,14 @@ assert.eq( N / 2 , x.shards.shard0001.count , "count on shard0001" )
start = new Date()
-join = startParallelShell( "db.foo.find( function(){ x = \"\"; for ( i=0; i<5000; i++ ){ x+=i; } return true; } ).itcount()" )
+print( "about to fork shell: " + Date() )
+join = startParallelShell( "db.foo.find( function(){ x = \"\"; for ( i=0; i<10000; i++ ){ x+=i; } return true; } ).itcount()" )
+print( "after forking shell: " + Date() )
-function getMine(){
+function getMine( printInprog ){
var inprog = db.currentOp().inprog;
+ if ( printInprog )
+ printjson( inprog )
var mine = []
for ( var x=0; x<inprog.length; x++ ){
if ( inprog[x].query && inprog[x].query.$where ){
@@ -40,8 +44,8 @@ function getMine(){
state = 0; // 0 = not found, 1 = killed,
killTime = null;
-for ( i=0; i<100000; i++ ){
- var mine = getMine();
+for ( i=0; i<( 100* 1000 ); i++ ){
+ mine = getMine( state == 0 && i > 20 );
if ( state == 0 ){
if ( mine.length == 0 ){
sleep(1);
@@ -56,14 +60,19 @@ for ( i=0; i<100000; i++ ){
state = 2;
break;
}
+ sleep(1)
continue;
}
}
+print( "after loop: " + Date() );
+assert( killTime , "timed out waiting too kill last mine:" + tojson(mine) )
+
+assert.eq( 2 , state , "failed killing" );
+
killTime = (new Date()).getTime() - killTime.getTime()
print( "killTime: " + killTime );
-assert.eq( 2 , state , "failed killing" );
assert.gt( 10000 , killTime , "took too long to kill" )
join()
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index 86faedc..e57dc1e 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -92,8 +92,8 @@ assert.eq( 0 , secondary.count() , "s count after drop" )
primary.save( { num : 1 } );
secondary.save( { num : 4 } );
-assert.eq( 1 , primary.count() , "p count after drop adn save" )
-assert.eq( 1 , secondary.count() , "s count after drop save " )
+assert.eq( 1 , primary.count() , "p count after drop and save" )
+assert.eq( 1 , secondary.count() , "s count after drop and save " )
print("*** makes sure that sharding knows where things live" );
diff --git a/jstests/slowNightly/sharding_migrate_cursor1.js b/jstests/slowNightly/sharding_migrate_cursor1.js
new file mode 100644
index 0000000..7198102
--- /dev/null
+++ b/jstests/slowNightly/sharding_migrate_cursor1.js
@@ -0,0 +1,65 @@
+
+chunksize = 25
+
+s = new ShardingTest( "migrate_cursor1" , 2 , 1 , 1 , { chunksize : chunksize } );
+
+s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
+
+s.adminCommand( { enablesharding : "test" } );
+db = s.getDB( "test" )
+t = db.foo
+
+bigString = ""
+stringSize = 1024;
+
+while ( bigString.length < stringSize )
+ bigString += "asdasdas";
+
+stringSize = bigString.length
+docsPerChunk = Math.ceil( ( chunksize * 1024 * 1024 ) / ( stringSize - 12 ) )
+numChunks = 5
+numDocs = 20 * docsPerChunk
+
+print( "stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs )
+
+for ( i=0; i<numDocs; i++ ){
+ t.insert( { _id : i , s : bigString } );
+}
+
+db.getLastError();
+
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+
+assert.lt( numChunks , s.config.chunks.find().count() , "initial 1" );
+
+primary = s.getServer( "test" ).getDB( "test" ).foo;
+secondaryName = s.getOther( primary.name )
+secondary = secondaryName.getDB( "test" ).foo;
+
+assert.eq( numDocs , primary.count() , "initial 2" );
+assert.eq( 0 , secondary.count() , "initial 3" );
+assert.eq( numDocs , t.count() , "initial 4" )
+
+x = primary.find( { _id : { $lt : 500 } } ).batchSize(2)
+x.next();
+
+s.adminCommand( { moveChunk : "test.foo" , find : { _id : 0 } , to : secondaryName.name } )
+
+join = startParallelShell( "sleep(5); db.x.insert( {x:1} ); db.adminCommand( { moveChunk : 'test.foo' , find : { _id : " + docsPerChunk * 3 + " } , to : '" + secondaryName.name + "' } )" )
+assert.soon( function(){ return db.x.count() > 0; } , "XXX" , 30000 , 1 )
+
+
+print( "itcount: " + x.itcount() )
+x = null;
+for ( i=0; i<5; i++ ) gc()
+
+print( "cursor should be gone" )
+
+join();
+
+//assert.soon( function(){ return numDocs == t.count(); } , "at end 1" )
+sleep( 5000 )
+assert.eq( numDocs , t.count() , "at end 2" )
+assert.eq( numDocs , primary.count() + secondary.count() , "at end 3" )
+
+s.stop()
diff --git a/jstests/updatee.js b/jstests/updatee.js
new file mode 100644
index 0000000..228eba0
--- /dev/null
+++ b/jstests/updatee.js
@@ -0,0 +1,71 @@
+// big numeric updates (used to overflow)
+
+t = db.updatee;
+t.drop();
+
+var o = { "_id" : 1,
+ "actual" : {
+ "key1" : "val1",
+ "key2" : "val2",
+ "001" : "val3",
+ "002" : "val4",
+ "0020000000000000000000" : "val5"
+ },
+ "profile-id" : "test" };
+
+
+t.insert( o );
+assert.eq( o , t.findOne() , "A1" );
+
+t.update({"profile-id" : "test"}, {$set: {"actual.0030000000000000000000": "val6"}});
+
+var q = t.findOne();
+
+// server-1347
+assert.eq(q.actual["0020000000000000000000"], "val5", "A2");
+assert.eq(q.actual["0030000000000000000000"], "val6", "A3");
+
+t.update({"profile-id" : "test"}, {$set: {"actual.02": "v4"}});
+
+q = t.findOne();
+assert.eq(q.actual["02"], "v4", "A4");
+assert(!q.actual["002"], "A5");
+
+t.update({"_id" : 1}, {$set : {"actual.2139043290148390248219423941.b" : 4}});
+q = t.findOne();
+assert.eq(q.actual["2139043290148390248219423941"].b, 4, "A6");
+
+// non-nested
+t.update({"_id" : 1}, {$set : {"7213647182934612837492342341" : 1}});
+t.update({"_id" : 1}, {$set : {"7213647182934612837492342342" : 2}});
+
+q = t.findOne();
+assert.eq(q["7213647182934612837492342341"], 1, "A7 1");
+assert.eq(q["7213647182934612837492342342"], 2, "A7 2");
+
+// 0s
+t.update({"_id" : 1}, {$set : {"actual.000" : "val000"}});
+q = t.findOne();
+assert.eq(q.actual["000"], "val000", "A8 zeros");
+
+t.update({"_id" : 1}, {$set : {"actual.00" : "val00"}});
+q = t.findOne();
+assert.eq(q.actual["00"], "val00", "A8 00");
+assert(!q.actual["000"], "A9");
+
+t.update({"_id" : 1}, {$set : {"actual.000" : "val000"}});
+q = t.findOne();
+assert.eq(q.actual["000"], "val000", "A9");
+assert(!q.actual["00"], "A10");
+
+t.update({"_id" : 1}, {$set : {"actual.01" : "val01"}});
+q = t.findOne();
+assert.eq(q.actual["000"], "val000", "A11");
+assert.eq(q.actual["01"], "val01", "A12");
+
+// shouldn't work, but shouldn't do anything too heinous, either
+t.update({"_id" : 1}, {$set : {"0.." : "val01"}});
+t.update({"_id" : 1}, {$set : {"0..0" : "val01"}});
+t.update({"_id" : 1}, {$set : {".0" : "val01"}});
+t.update({"_id" : 1}, {$set : {"..0" : "val01"}});
+t.update({"_id" : 1}, {$set : {"0.0..0" : "val01"}});
diff --git a/lib/libboost_thread-gcc41-mt-d-1_34_1.a b/lib/libboost_thread-gcc41-mt-d-1_34_1.a
new file mode 100644
index 0000000..09377ac
--- /dev/null
+++ b/lib/libboost_thread-gcc41-mt-d-1_34_1.a
Binary files differ
diff --git a/rpm/mongo.spec b/rpm/mongo.spec
index 225639e..98f4d39 100644
--- a/rpm/mongo.spec
+++ b/rpm/mongo.spec
@@ -1,5 +1,5 @@
Name: mongo
-Version: 1.6.3
+Version: 1.6.5
Release: mongodb_1%{?dist}
Summary: mongo client shell and tools
License: AGPL 3.0
diff --git a/s/balance.cpp b/s/balance.cpp
index f79e1d8..33cafdf 100644
--- a/s/balance.cpp
+++ b/s/balance.cpp
@@ -54,13 +54,13 @@ namespace mongo {
const BSONObj& chunkToMove = chunkInfo.chunk;
ChunkPtr c = cm->findChunk( chunkToMove["min"].Obj() );
- if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) ){
+ if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) || c->getMax().woCompare( chunkToMove["max"].Obj() ) ) {
// likely a split happened somewhere
- cm = cfg->getChunkManager( chunkInfo.ns , true );
+ cm = cfg->getChunkManager( chunkInfo.ns , true /* reload */);
assert( cm );
c = cm->findChunk( chunkToMove["min"].Obj() );
- if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) ){
+ if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) || c->getMax().woCompare( chunkToMove["max"].Obj() ) ) {
log() << "chunk mismatch after reload, ignoring will retry issue cm: "
<< c->getMin() << " min: " << chunkToMove["min"].Obj() << endl;
continue;
diff --git a/s/chunk.cpp b/s/chunk.cpp
index cf1f992..87d7747 100644
--- a/s/chunk.cpp
+++ b/s/chunk.cpp
@@ -843,7 +843,6 @@ namespace mongo {
_chunkMap.clear();
_chunkRanges.clear();
_shards.clear();
-
// delete data from mongod
for ( set<Shard>::iterator i=seen.begin(); i!=seen.end(); i++ ){
@@ -872,7 +871,6 @@ namespace mongo {
conn.done();
}
-
log(1) << "ChunkManager::drop : " << _ns << "\t DONE" << endl;
configServer.logChange( "dropCollection" , _ns , BSONObj() );
}
diff --git a/s/commands_public.cpp b/s/commands_public.cpp
index 91563d2..80d5cc9 100644
--- a/s/commands_public.cpp
+++ b/s/commands_public.cpp
@@ -183,7 +183,7 @@ namespace mongo {
class DBStatsCmd : public RunOnAllShardsCommand {
public:
- DBStatsCmd() : RunOnAllShardsCommand("dbstats") {}
+ DBStatsCmd() : RunOnAllShardsCommand("dbStats", "dbstats") {}
virtual void aggregateResults(const vector<BSONObj>& results, BSONObjBuilder& output) {
long long objects = 0;
@@ -438,7 +438,7 @@ namespace mongo {
class CollectionStats : public PublicGridCommand {
public:
- CollectionStats() : PublicGridCommand("collstats") { }
+ CollectionStats() : PublicGridCommand("collStats", "collstats") { }
bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
string collection = cmdObj.firstElement().valuestrsafe();
string fullns = dbName + "." + collection;
@@ -517,7 +517,7 @@ namespace mongo {
class FindAndModifyCmd : public PublicGridCommand {
public:
- FindAndModifyCmd() : PublicGridCommand("findandmodify") { }
+ FindAndModifyCmd() : PublicGridCommand("findAndModify", "findandmodify") { }
bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
string collection = cmdObj.firstElement().valuestrsafe();
string fullns = dbName + "." + collection;
diff --git a/s/config.cpp b/s/config.cpp
index e1016a0..1ad15d5 100644
--- a/s/config.cpp
+++ b/s/config.cpp
@@ -62,6 +62,7 @@ namespace mongo {
void DBConfig::CollectionInfo::shard( DBConfig * db , const string& ns , const ShardKeyPattern& key , bool unique ){
_cm.reset( new ChunkManager( db, ns , key , unique ) );
_dirty = true;
+ _dropped = false;
}
void DBConfig::CollectionInfo::unshard(){
@@ -81,10 +82,12 @@ namespace mongo {
_cm->getInfo( val );
conn->update( ShardNS::collection , key , val.obj() , true );
+ string err = conn->getLastError();
+ uassert( 13473 , (string)"failed to save collection (" + ns + "): " + err , err.size() == 0 );
+
_dirty = false;
}
-
bool DBConfig::isSharded( const string& ns ){
if ( ! _shardingEnabled )
return false;
@@ -124,7 +127,7 @@ namespace mongo {
scoped_lock lk( _lock );
CollectionInfo& ci = _collections[ns];
- uassert( 8043 , "already sharded" , ! ci.isSharded() );
+ uassert( 8043 , "collection already sharded" , ! ci.isSharded() );
log() << "enable sharding on: " << ns << " with shard key: " << fieldsAndOrder << endl;
diff --git a/s/d_migrate.cpp b/s/d_migrate.cpp
index b8ee78e..8e9584c 100644
--- a/s/d_migrate.cpp
+++ b/s/d_migrate.cpp
@@ -49,9 +49,11 @@ namespace mongo {
class MoveTimingHelper {
public:
- MoveTimingHelper( const string& where , const string& ns )
+ MoveTimingHelper( const string& where , const string& ns , BSONObj min , BSONObj max )
: _where( where ) , _ns( ns ){
_next = 1;
+ _b.append( "min" , min );
+ _b.append( "max" , max );
}
~MoveTimingHelper(){
@@ -100,9 +102,11 @@ namespace mongo {
log() << "moveChunk deleted: " << num << endl;
}
};
+
+ static const char * const cleanUpThreadName = "cleanupOldData";
void _cleanupOldData( OldDataCleanup cleanup ){
- Client::initThread( "cleanupOldData");
+ Client::initThread( cleanUpThreadName );
log() << " (start) waiting to cleanup " << cleanup.ns << " from " << cleanup.min << " -> " << cleanup.max << " # cursors:" << cleanup.initial.size() << endl;
int loops = 0;
@@ -240,6 +244,14 @@ namespace mongo {
switch ( opstr[0] ){
case 'd': {
+
+ if ( getThreadName() == cleanUpThreadName ){
+ // we don't want to xfer things we're cleaning
+ // as then they'll be deleted on TO
+ // which is bad
+ return;
+ }
+
// can't filter deletes :(
scoped_lock lk( _mutex );
_deleted.push_back( ide.wrap() );
@@ -267,7 +279,7 @@ namespace mongo {
}
void xfer( list<BSONObj> * l , BSONObjBuilder& b , const char * name , long long& size , bool explode ){
- static long long maxSize = 1024 * 1024;
+ const long long maxSize = 1024 * 1024;
if ( l->size() == 0 || size > maxSize )
return;
@@ -437,7 +449,7 @@ namespace mongo {
configServer.init( configdb );
}
- MoveTimingHelper timing( "from" , ns );
+ MoveTimingHelper timing( "from" , ns , min , max );
Shard fromShard( from );
Shard toShard( to );
@@ -702,13 +714,13 @@ namespace mongo {
}
void _go(){
- MoveTimingHelper timing( "to" , ns );
-
assert( active );
assert( state == READY );
assert( ! min.isEmpty() );
assert( ! max.isEmpty() );
+ MoveTimingHelper timing( "to" , ns , min , max );
+
ScopedDbConnection conn( from );
conn->getLastError(); // just test connection
@@ -841,6 +853,17 @@ namespace mongo {
BSONObjIterator i( xfer["deleted"].Obj() );
while ( i.more() ){
BSONObj id = i.next().Obj();
+
+ // do not apply deletes if they do not belong to the chunk being migrated
+ BSONObj fullObj;
+ if ( Helpers::findById( cc() , ns.c_str() , id, fullObj ) ) {
+ if ( ! isInRange( fullObj , min , max ) ) {
+ log() << "not applying out of range deletion: " << fullObj << endl;
+
+ continue;
+ }
+ }
+
Helpers::removeRange( ns , id , id, false , true , cmdLine.moveParanoia ? &rs : 0 );
didAnything = true;
}
diff --git a/s/grid.cpp b/s/grid.cpp
index e4991b2..443cd9a 100644
--- a/s/grid.cpp
+++ b/s/grid.cpp
@@ -242,8 +242,8 @@ namespace mongo {
DBConfigPtr config = getDBConfig( *it , false );
if ( config.get() != NULL ){
ostringstream ss;
- ss << "trying to add shard " << servers.toString() << " because local database " << *it;
- ss << " exists in another " << config->getPrimary().toString();
+ ss << "can't add shard " << servers.toString() << " because a local database '" << *it;
+ ss << "' exists in another " << config->getPrimary().toString();
errMsg = ss.str();
return false;
}
diff --git a/shell/mongo_vstudio.cpp b/shell/mongo_vstudio.cpp
index 8b23ef1..c1a224d 100644
--- a/shell/mongo_vstudio.cpp
+++ b/shell/mongo_vstudio.cpp
@@ -654,11 +654,13 @@ const char * jsconcatcode =
"print(\"\\trs.status() { replSetGetStatus : 1 } checks repl set status\");\n"
"print(\"\\trs.initiate() { replSetInitiate : null } initiates set with default settings\");\n"
"print(\"\\trs.initiate(cfg) { replSetInitiate : cfg } initiates set with configuration cfg\");\n"
+ "print(\"\\trs.conf() get the current configuration object from local.system.replset\");\n"
+ "print(\"\\trs.reconfig(cfg) updates the configuration of a running replica set with cfg\");\n"
"print(\"\\trs.add(hostportstr) add a new member to the set with default attributes\");\n"
"print(\"\\trs.add(membercfgobj) add a new member to the set with extra attributes\");\n"
"print(\"\\trs.addArb(hostportstr) add a new member which is arbiterOnly:true\");\n"
"print(\"\\trs.stepDown() step down as primary (momentarily)\");\n"
- "print(\"\\trs.conf() return configuration from local.system.replset\");\n"
+ "print(\"\\trs.remove(hostportstr) remove a host from the replica set\");\n"
"print(\"\\trs.slaveOk() shorthand for db.getMongo().setSlaveOk()\");\n"
"print();\n"
"print(\"\\tdb.isMaster() check who is primary\");\n"
@@ -668,6 +670,9 @@ const char * jsconcatcode =
"rs.status = function () { return db._adminCommand(\"replSetGetStatus\"); }\n"
"rs.isMaster = function () { return db.isMaster(); }\n"
"rs.initiate = function (c) { return db._adminCommand({ replSetInitiate: c }); }\n"
+ "rs.reconfig = function(cfg) {\n"
+ "cfg.version = rs.conf().version + 1;\n"
+ "return db._adminCommand({ replSetReconfig: cfg });}\n"
"rs.add = function (hostport, arb) {\n"
"var cfg = hostport;\n"
"var local = db.getSisterDB(\"local\");\n"
@@ -687,6 +692,18 @@ const char * jsconcatcode =
"rs.stepDown = function () { return db._adminCommand({ replSetStepDown:true}); }\n"
"rs.addArb = function (hn) { return this.add(hn, true); }\n"
"rs.conf = function () { return db.getSisterDB(\"local\").system.replset.findOne(); }\n"
+ "rs.remove = function (hn) {\n"
+ "var local = db.getSisterDB(\"local\");\n"
+ "assert(local.system.replset.count() <= 1, \"error: local.system.replset has unexpected contents\");\n"
+ "var c = local.system.replset.findOne();\n"
+ "assert(c, \"no config object retrievable from local.system.replset\");\n"
+ "c.version++;\n"
+ "for (var i in c.members) {\n"
+ "if (c.members[i].host == hn) {\n"
+ "c.members.splice(i, 1);\n"
+ "return db._adminCommand({ replSetReconfig : c});}}\n"
+ "return \"error: couldn't find \"+hn+\" in \"+tojson(c.members);\n"
+ "};\n"
"help = shellHelper.help = function (x) {\n"
"if (x == \"connect\") {\n"
"print(\"\\nNormally one specifies the server on the mongo shell command line. Run mongo --help to see those options.\");\n"
@@ -1372,7 +1389,17 @@ const char * jsconcatcode =
"var e = n.next();\n"
"if (!verbose) {\n"
"delete e.allPlans;\n"
- "delete e.oldPlan;}\n"
+ "delete e.oldPlan;\n"
+ "if (e.shards){\n"
+ "for (var key in e.shards){\n"
+ "var s = e.shards[key];\n"
+ "if(s.length === undefined){\n"
+ "delete s.allPlans;\n"
+ "delete s.oldPlan;\n"
+ "} else {\n"
+ "for (var i=0; i < s.length; i++){\n"
+ "delete s[i].allPlans;\n"
+ "delete s[i].oldPlan;}}}}}\n"
"return e;}\n"
"DBQuery.prototype.snapshot = function(){\n"
"this._ensureSpecial();\n"
@@ -1452,6 +1479,8 @@ const char * jsconcatcode =
"return __magicNoPrint;}\n"
"DBCollection.prototype.getFullName = function(){\n"
"return this._fullName;}\n"
+ "DBCollection.prototype.getMongo = function(){\n"
+ "return this._db.getMongo();}\n"
"DBCollection.prototype.getDB = function(){\n"
"return this._db;}\n"
"DBCollection.prototype._dbCommand = function( cmd , params ){\n"
@@ -1516,8 +1545,8 @@ const char * jsconcatcode =
"obj[key] = tmp[key];}}\n"
"this._mongo.insert( this._fullName , obj );\n"
"this._lastID = obj._id;}\n"
- "DBCollection.prototype.remove = function( t ){\n"
- "this._mongo.remove( this._fullName , this._massageObject( t ) );}\n"
+ "DBCollection.prototype.remove = function( t , justOne ){\n"
+ "this._mongo.remove( this._fullName , this._massageObject( t ) , justOne ? true : false );}\n"
"DBCollection.prototype.update = function( query , obj , upsert , multi ){\n"
"assert( query , \"need a query\" );\n"
"assert( obj , \"need an object\" );\n"
diff --git a/shell/servers.js b/shell/servers.js
index eb548ea..1b58c81 100644
--- a/shell/servers.js
+++ b/shell/servers.js
@@ -457,13 +457,15 @@ printShardingStatus = function( configDB ){
if (db.partitioned){
configDB.collections.find( { _id : new RegExp( "^" + db._id + "\." ) } ).sort( { _id : 1 } ).forEach(
function( coll ){
- output("\t\t" + coll._id + " chunks:");
- configDB.chunks.find( { "ns" : coll._id } ).sort( { min : 1 } ).forEach(
- function(chunk){
- output( "\t\t\t" + tojson( chunk.min ) + " -->> " + tojson( chunk.max ) +
- " on : " + chunk.shard + " " + tojson( chunk.lastmod ) );
- }
- );
+ if ( coll.dropped == false ){
+ output("\t\t" + coll._id + " chunks:");
+ configDB.chunks.find( { "ns" : coll._id } ).sort( { min : 1 } ).forEach(
+ function(chunk){
+ output( "\t\t\t" + tojson( chunk.min ) + " -->> " + tojson( chunk.max ) +
+ " on : " + chunk.shard + " " + tojson( chunk.lastmod ) );
+ }
+ );
+ }
}
)
}
@@ -1314,7 +1316,7 @@ ReplSetTest.prototype.awaitReplication = function() {
var entry = log.find({}).sort({'$natural': -1}).limit(1).next();
printjson( entry );
var ts = entry['ts'];
- print("TS for " + slave + " is " + ts + " and latest is " + latest);
+ print("TS for " + slave + " is " + ts.t + " and latest is " + latest.t);
print("Oplog size for " + slave + " is " + log.count());
synced = (synced && friendlyEqual(latest,ts))
}
diff --git a/shell/shell_utils.cpp b/shell/shell_utils.cpp
index 5260015..b6a67e2 100644
--- a/shell/shell_utils.cpp
+++ b/shell/shell_utils.cpp
@@ -706,8 +706,8 @@ namespace mongo {
kill_wrapper( pid, signal, port );
int i = 0;
- for( ; i < 65; ++i ) {
- if ( i == 5 ) {
+ for( ; i < 130; ++i ) {
+ if ( i == 30 ) {
char now[64];
time_t_to_String(time(0), now);
now[ 20 ] = 0;
diff --git a/shell/utils.js b/shell/utils.js
index de26403..71f3fbd 100644
--- a/shell/utils.js
+++ b/shell/utils.js
@@ -1052,11 +1052,13 @@ rs.help = function () {
print("\trs.status() { replSetGetStatus : 1 } checks repl set status");
print("\trs.initiate() { replSetInitiate : null } initiates set with default settings");
print("\trs.initiate(cfg) { replSetInitiate : cfg } initiates set with configuration cfg");
+ print("\trs.conf() get the current configuration object from local.system.replset");
+ print("\trs.reconfig(cfg) updates the configuration of a running replica set with cfg");
print("\trs.add(hostportstr) add a new member to the set with default attributes");
print("\trs.add(membercfgobj) add a new member to the set with extra attributes");
print("\trs.addArb(hostportstr) add a new member which is arbiterOnly:true");
print("\trs.stepDown() step down as primary (momentarily)");
- print("\trs.conf() return configuration from local.system.replset");
+ print("\trs.remove(hostportstr) remove a host from the replica set");
print("\trs.slaveOk() shorthand for db.getMongo().setSlaveOk()");
print();
print("\tdb.isMaster() check who is primary");
@@ -1067,6 +1069,11 @@ rs.slaveOk = function () { return db.getMongo().setSlaveOk(); }
rs.status = function () { return db._adminCommand("replSetGetStatus"); }
rs.isMaster = function () { return db.isMaster(); }
rs.initiate = function (c) { return db._adminCommand({ replSetInitiate: c }); }
+rs.reconfig = function(cfg) {
+ cfg.version = rs.conf().version + 1;
+
+ return db._adminCommand({ replSetReconfig: cfg });
+}
rs.add = function (hostport, arb) {
var cfg = hostport;
@@ -1074,7 +1081,9 @@ rs.add = function (hostport, arb) {
assert(local.system.replset.count() <= 1, "error: local.system.replset has unexpected contents");
var c = local.system.replset.findOne();
assert(c, "no config object retrievable from local.system.replset");
+
c.version++;
+
var max = 0;
for (var i in c.members)
if (c.members[i]._id > max) max = c.members[i]._id;
@@ -1090,6 +1099,23 @@ rs.stepDown = function () { return db._adminCommand({ replSetStepDown:true}); }
rs.addArb = function (hn) { return this.add(hn, true); }
rs.conf = function () { return db.getSisterDB("local").system.replset.findOne(); }
+rs.remove = function (hn) {
+ var local = db.getSisterDB("local");
+ assert(local.system.replset.count() <= 1, "error: local.system.replset has unexpected contents");
+ var c = local.system.replset.findOne();
+ assert(c, "no config object retrievable from local.system.replset");
+ c.version++;
+
+ for (var i in c.members) {
+ if (c.members[i].host == hn) {
+ c.members.splice(i, 1);
+ return db._adminCommand({ replSetReconfig : c});
+ }
+ }
+
+ return "error: couldn't find "+hn+" in "+tojson(c.members);
+};
+
help = shellHelper.help = function (x) {
if (x == "connect") {
print("\nNormally one specifies the server on the mongo shell command line. Run mongo --help to see those options.");
diff --git a/util/goodies.h b/util/goodies.h
index c43f356..7b73996 100644
--- a/util/goodies.h
+++ b/util/goodies.h
@@ -651,29 +651,50 @@ namespace mongo {
// for convenience, '{' is greater than anything and stops number parsing
inline int lexNumCmp( const char *s1, const char *s2 ) {
+ //cout << "START : " << s1 << "\t" << s2 << endl;
while( *s1 && *s2 ) {
- bool p1 = ( *s1 == '{' );
- bool p2 = ( *s2 == '{' );
+ bool p1 = ( *s1 == (char)255 );
+ bool p2 = ( *s2 == (char)255 );
+ //cout << "\t\t " << p1 << "\t" << p2 << endl;
if ( p1 && !p2 )
return 1;
if ( p2 && !p1 )
return -1;
-
+
bool n1 = isNumber( *s1 );
bool n2 = isNumber( *s2 );
if ( n1 && n2 ) {
- char * e1;
- char * e2;
- long l1 = strtol( s1 , &e1 , 10 );
- long l2 = strtol( s2 , &e2 , 10 );
-
- if ( l1 > l2 )
+ // get rid of leading 0s
+ while ( *s1 == '0' ) s1++;
+ while ( *s2 == '0' ) s2++;
+
+ char * e1 = (char*)s1;
+ char * e2 = (char*)s2;
+
+ // find length
+ // if end of string, will break immediately ('\0')
+ while ( isNumber (*e1) ) e1++;
+ while ( isNumber (*e2) ) e2++;
+
+ int len1 = e1-s1;
+ int len2 = e2-s2;
+
+ int result;
+ // if one is longer than the other, return
+ if ( len1 > len2 ) {
return 1;
- else if ( l1 < l2 )
+ }
+ else if ( len2 > len1 ) {
return -1;
-
+ }
+ // if the lengths are equal, just strcmp
+ else if ( (result = strncmp(s1, s2, len1)) != 0 ) {
+ return result;
+ }
+
+ // otherwise, the numbers are equal
s1 = e1;
s2 = e2;
continue;
diff --git a/util/message.cpp b/util/message.cpp
index a809c1f..cd19bd5 100644
--- a/util/message.cpp
+++ b/util/message.cpp
@@ -301,7 +301,7 @@ namespace mongo {
ports.insert(this);
}
- MessagingPort::MessagingPort( int timeout, int ll ) : tag(0) {
+ MessagingPort::MessagingPort( double timeout, int ll ) : tag(0) {
_logLevel = ll;
ports.insert(this);
sock = -1;
diff --git a/util/message.h b/util/message.h
index 203ad83..9651141 100644
--- a/util/message.h
+++ b/util/message.h
@@ -98,7 +98,7 @@ namespace mongo {
// in some cases the timeout will actually be 2x this value - eg we do a partial send,
// then the timeout fires, then we try to send again, then the timeout fires again with
// no data sent, then we detect that the other side is down
- MessagingPort(int timeout = 0, int logLevel = 0 );
+ MessagingPort(double timeout = 0, int logLevel = 0 );
virtual ~MessagingPort();
@@ -133,7 +133,7 @@ namespace mongo {
PiggyBackData * piggyBackData;
public:
SockAddr farEnd;
- int _timeout;
+ double _timeout;
int _logLevel; // passed to log() when logging errors
static void closeAllSockets(unsigned tagMask = 0xffffffff);
diff --git a/util/ntservice.cpp b/util/ntservice.cpp
index fe4ae44..22f83a5 100644
--- a/util/ntservice.cpp
+++ b/util/ntservice.cpp
@@ -56,8 +56,13 @@ namespace mongo {
commandLine << arg << " \"" << dbpath << "\" ";
i++;
continue;
+ } else if ( arg == "--logpath" && i + 1 < argc ) {
+ commandLine << arg << " \"" << argv[i+1] << "\" ";
+ i++;
+ continue;
} else if ( arg.length() > 9 && arg.substr(0, 9) == "--service" ) {
// Strip off --service(Name|User|Password) arguments
+ i++;
continue;
}
commandLine << arg << " ";
diff --git a/util/sock.cpp b/util/sock.cpp
index c4e1a71..3b97c4b 100644
--- a/util/sock.cpp
+++ b/util/sock.cpp
@@ -51,7 +51,7 @@ namespace mongo {
addrinfo hints;
memset(&hints, 0, sizeof(addrinfo));
hints.ai_socktype = SOCK_STREAM;
- hints.ai_flags = AI_ADDRCONFIG;
+ //hints.ai_flags = AI_ADDRCONFIG; // This is often recommended but don't do it. SERVER-1579
hints.ai_family = (IPv6Enabled() ? AF_UNSPEC : AF_INET);
stringstream ss;
diff --git a/util/sock.h b/util/sock.h
index 4b4290d..897be8a 100644
--- a/util/sock.h
+++ b/util/sock.h
@@ -115,10 +115,10 @@ namespace mongo {
return "/tmp/mongodb-" + BSONObjBuilder::numStr(port) + ".sock";
}
- inline void setSockTimeouts(int sock, int secs) {
+ inline void setSockTimeouts(int sock, double secs) {
struct timeval tv;
- tv.tv_sec = secs;
- tv.tv_usec = 0;
+ tv.tv_sec = (int)secs;
+ tv.tv_usec = (int)((long long)(secs*1000*1000) % (1000*1000));
bool report = logLevel > 3; // solaris doesn't provide these
DEV report = true;
bool ok = setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (char *) &tv, sizeof(tv) ) == 0;
diff --git a/util/version.cpp b/util/version.cpp
index 4987e19..c5ca8d4 100644
--- a/util/version.cpp
+++ b/util/version.cpp
@@ -14,7 +14,7 @@ namespace mongo {
// mongo processes version support
//
- const char versionString[] = "1.6.3";
+ const char versionString[] = "1.6.6-pre-";
string mongodVersion() {
stringstream ss;