summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--client/dbclient.h21
-rw-r--r--db/client.cpp25
-rw-r--r--db/clientcursor.cpp102
-rw-r--r--db/clientcursor.h38
-rw-r--r--db/cmdline.cpp2
-rw-r--r--db/curop.h8
-rw-r--r--db/database.h5
-rw-r--r--db/dbcommands.cpp39
-rw-r--r--db/dbcommands_generic.cpp7
-rw-r--r--db/lasterror.cpp10
-rw-r--r--db/lasterror.h4
-rw-r--r--db/mr.cpp10
-rw-r--r--db/pdfile.cpp12
-rw-r--r--db/query.cpp6
-rw-r--r--db/repl/rs.cpp1
-rw-r--r--db/replpair.h2
-rw-r--r--db/restapi.cpp4
-rw-r--r--db/scanandorder.h11
-rw-r--r--db/update.cpp9
-rw-r--r--debian/changelog16
-rw-r--r--debian/files1
-rw-r--r--debian/mongodb.upstart15
-rw-r--r--debian/preinst37
-rw-r--r--doxygenConfig2
-rw-r--r--jstests/basic3.js2
-rw-r--r--jstests/basic9.js2
-rw-r--r--jstests/dbcase.js4
-rw-r--r--jstests/delx.js31
-rw-r--r--jstests/sharding/addshard2.js52
-rw-r--r--jstests/slowNightly/capped4.js (renamed from jstests/capped4.js)8
-rw-r--r--jstests/slowNightly/cursor8.js (renamed from jstests/cursor8.js)0
-rw-r--r--jstests/slowNightly/recstore.js (renamed from jstests/recstore.js)0
-rw-r--r--lib/libboost_thread-gcc41-mt-d-1_34_1.abin0 -> 692920 bytes
-rw-r--r--rpm/mongo.spec2
-rw-r--r--s/commands_admin.cpp31
-rw-r--r--s/commands_public.cpp1
-rw-r--r--s/grid.cpp102
-rw-r--r--s/grid.h7
-rw-r--r--s/server.cpp5
-rw-r--r--s/shard.cpp13
-rw-r--r--scripting/sm_db.cpp6
-rw-r--r--shell/servers.js22
-rw-r--r--util/version.cpp2
43 files changed, 513 insertions, 164 deletions
diff --git a/client/dbclient.h b/client/dbclient.h
index ea370c4..5ca2b8f 100644
--- a/client/dbclient.h
+++ b/client/dbclient.h
@@ -106,10 +106,11 @@ namespace mongo {
_finishInit();
}
- ConnectionString( ConnectionType type , const vector<HostAndPort>& servers )
- : _type( type ) , _servers( servers ){
- _finishInit();
- }
+ // TODO Delete if nobody is using
+ //ConnectionString( ConnectionType type , const vector<HostAndPort>& servers )
+ // : _type( type ) , _servers( servers ){
+ // _finishInit();
+ //}
ConnectionString( ConnectionType type , const string& s , const string& setName = "" ){
_type = type;
@@ -156,6 +157,14 @@ namespace mongo {
static ConnectionString parse( const string& url , string& errmsg );
+ string getSetName() const{
+ return _setName;
+ }
+
+ vector<HostAndPort> getServers() const {
+ return _servers;
+ }
+
private:
ConnectionString(){
@@ -314,7 +323,7 @@ namespace mongo {
/** Typically one uses the QUERY(...) macro to construct a Query object.
Example: QUERY( "age" << 33 << "school" << "UCLA" )
*/
-#define QUERY(x) Query( BSON(x) )
+#define QUERY(x) mongo::Query( BSON(x) )
/**
interface that handles communication with the db
@@ -1000,7 +1009,7 @@ namespace mongo {
virtual void checkResponse( const char *data, int nReturned ) { checkMaster()->checkResponse( data , nReturned ); }
protected:
- virtual void sayPiggyBack( Message &toSend ) { assert(false); }
+ virtual void sayPiggyBack( Message &toSend ) { checkMaster()->say( toSend ); }
bool isFailed() const {
return _currentMaster == 0 || _currentMaster->isFailed();
diff --git a/db/client.cpp b/db/client.cpp
index 65c467a..9781041 100644
--- a/db/client.cpp
+++ b/db/client.cpp
@@ -259,6 +259,29 @@ namespace mongo {
}
}
+ BSONObj CurOp::query( bool threadSafe ) {
+ if( querySize() == 1 ) {
+ return _tooBig;
+ }
+
+ if ( ! threadSafe ){
+ BSONObj o(_queryBuf);
+ return o;
+ }
+
+ int size = querySize();
+ int before = checksum( _queryBuf , size );
+ BSONObj a(_queryBuf);
+ BSONObj b = a.copy();
+ int after = checksum( _queryBuf , size );
+
+ if ( before == after )
+ return b;
+
+ return BSON( "msg" << "query changed while capturing" );
+ }
+
+
BSONObj CurOp::infoNoauth( int attempt ) {
BSONObjBuilder b;
b.append("opid", _opNum);
@@ -402,7 +425,7 @@ namespace mongo {
tablecell( ss , co.getOp() );
tablecell( ss , co.getNS() );
if ( co.haveQuery() )
- tablecell( ss , co.query() );
+ tablecell( ss , co.query( true ) );
else
tablecell( ss , "" );
tablecell( ss , co.getRemoteString() );
diff --git a/db/clientcursor.cpp b/db/clientcursor.cpp
index ad7f9ce..23ef529 100644
--- a/db/clientcursor.cpp
+++ b/db/clientcursor.cpp
@@ -32,27 +32,37 @@
namespace mongo {
+ typedef multimap<DiskLoc, ClientCursor*> CCByLoc;
+
CCById ClientCursor::clientCursorsById;
- CCByLoc ClientCursor::byLoc;
boost::recursive_mutex ClientCursor::ccmutex;
+ long long ClientCursor::numberTimedOut = 0;
- unsigned ClientCursor::byLocSize() {
+ /*static*/ void ClientCursor::assertNoCursors() {
recursive_scoped_lock lock(ccmutex);
- return byLoc.size();
+ if( clientCursorsById.size() ) {
+ log() << "ERROR clientcursors exist but should not at this point" << endl;
+ ClientCursor *cc = clientCursorsById.begin()->second;
+ log() << "first one: " << cc->cursorid << ' ' << cc->ns << endl;
+ clientCursorsById.clear();
+ assert(false);
+ }
}
+
void ClientCursor::setLastLoc_inlock(DiskLoc L) {
if ( L == _lastLoc )
return;
+ CCByLoc& bl = byLoc();
if ( !_lastLoc.isNull() ) {
- CCByLoc::iterator i = kv_find(byLoc, _lastLoc, this);
- if ( i != byLoc.end() )
- byLoc.erase(i);
+ CCByLoc::iterator i = kv_find(bl, _lastLoc, this);
+ if ( i != bl.end() )
+ bl.erase(i);
}
if ( !L.isNull() )
- byLoc.insert( make_pair(L, this) );
+ bl.insert( make_pair(L, this) );
_lastLoc = L;
}
@@ -76,24 +86,52 @@ namespace mongo {
{
recursive_scoped_lock lock(ccmutex);
- for ( CCByLoc::iterator i = byLoc.begin(); i != byLoc.end(); ++i ) {
+ Database *db = cc().database();
+ assert(db);
+ assert( str::startsWith(nsPrefix, db->name) );
+
+ for( CCById::iterator i = clientCursorsById.begin(); i != clientCursorsById.end(); ++i ) {
ClientCursor *cc = i->second;
- if ( strncmp(nsPrefix, cc->ns.c_str(), len) == 0 )
+ if( cc->_db != db )
+ continue;
+ if ( strncmp(nsPrefix, cc->ns.c_str(), len) == 0 ) {
toDelete.push_back(i->second);
+ }
}
+ /*
+ note : we can't iterate byloc because clientcursors may exist with a loc of null in which case
+ they are not in the map. perhaps they should not exist though in the future? something to
+ change???
+
+ CCByLoc& bl = db->ccByLoc;
+ for ( CCByLoc::iterator i = bl.begin(); i != bl.end(); ++i ) {
+ ClientCursor *cc = i->second;
+ if ( strncmp(nsPrefix, cc->ns.c_str(), len) == 0 ) {
+ assert( cc->_db == db );
+ toDelete.push_back(i->second);
+ }
+ }*/
+
for ( vector<ClientCursor*>::iterator i = toDelete.begin(); i != toDelete.end(); ++i )
delete (*i);
}
}
+ bool ClientCursor::shouldTimeout( unsigned millis ){
+ _idleAgeMillis += millis;
+ return _idleAgeMillis > 600000 && _pinValue == 0;
+ }
+
/* called every 4 seconds. millis is amount of idle time passed since the last call -- could be zero */
void ClientCursor::idleTimeReport(unsigned millis) {
+ readlock lk("");
recursive_scoped_lock lock(ccmutex);
- for ( CCByLoc::iterator i = byLoc.begin(); i != byLoc.end(); ) {
- CCByLoc::iterator j = i;
+ for ( CCById::iterator i = clientCursorsById.begin(); i != clientCursorsById.end(); ) {
+ CCById::iterator j = i;
i++;
if( j->second->shouldTimeout( millis ) ){
+ numberTimedOut++;
log(1) << "killing old cursor " << j->second->cursorid << ' ' << j->second->ns
<< " idle:" << j->second->idleTime() << "ms\n";
delete j->second;
@@ -106,10 +144,12 @@ namespace mongo {
*/
void ClientCursor::informAboutToDeleteBucket(const DiskLoc& b) {
recursive_scoped_lock lock(ccmutex);
- RARELY if ( byLoc.size() > 70 ) {
- log() << "perf warning: byLoc.size=" << byLoc.size() << " in aboutToDeleteBucket\n";
+ Database *db = cc().database();
+ CCByLoc& bl = db->ccByLoc;
+ RARELY if ( bl.size() > 70 ) {
+ log() << "perf warning: byLoc.size=" << bl.size() << " in aboutToDeleteBucket\n";
}
- for ( CCByLoc::iterator i = byLoc.begin(); i != byLoc.end(); i++ )
+ for ( CCByLoc::iterator i = bl.begin(); i != bl.end(); i++ )
i->second->c->aboutToDeleteBucket(b);
}
void aboutToDeleteBucket(const DiskLoc& b) {
@@ -120,8 +160,11 @@ namespace mongo {
void ClientCursor::aboutToDelete(const DiskLoc& dl) {
recursive_scoped_lock lock(ccmutex);
- CCByLoc::iterator j = byLoc.lower_bound(dl);
- CCByLoc::iterator stop = byLoc.upper_bound(dl);
+ Database *db = cc().database();
+ assert(db);
+ CCByLoc& bl = db->ccByLoc;
+ CCByLoc::iterator j = bl.lower_bound(dl);
+ CCByLoc::iterator stop = bl.upper_bound(dl);
if ( j == stop )
return;
@@ -139,6 +182,7 @@ namespace mongo {
for ( vector<ClientCursor*>::iterator i = toAdvance.begin(); i != toAdvance.end(); ++i ){
ClientCursor* cc = *i;
+ wassert(cc->_db == db);
if ( cc->_doingDeletes ) continue;
@@ -157,7 +201,9 @@ namespace mongo {
c->advance();
if ( c->eof() ) {
// advanced to end
- // leave ClieneCursor in place so next getMore doesn't fail
+ // leave ClientCursor in place so next getMore doesn't fail
+ // still need to mark new location though
+ cc->updateLocation();
}
else {
wassert( c->refLoc() != dl );
@@ -296,6 +342,13 @@ namespace mongo {
int ctmLast = 0; // so we don't have to do find() which is a little slow very often.
long long ClientCursor::allocCursorId_inlock() {
+ if( 0 ) {
+ static long long z;
+ ++z;
+ cout << "TEMP alloccursorid " << z << endl;
+ return z;
+ }
+
long long x;
int ctm = (int) curTimeMillis();
while ( 1 ) {
@@ -328,7 +381,13 @@ namespace mongo {
}
-
+ void ClientCursor::appendStats( BSONObjBuilder& result ){
+ recursive_scoped_lock lock(ccmutex);
+ result.appendNumber("totalOpen", (int)clientCursorsById.size() );
+ result.appendNumber("clientCursors_size", (int) numCursors());
+ result.appendNumber("timedOut" , (int)numberTimedOut);
+ }
+
// QUESTION: Restrict to the namespace from which this command was issued?
// Alternatively, make this command admin-only?
class CmdCursorInfo : public Command {
@@ -339,11 +398,8 @@ namespace mongo {
help << " example: { cursorInfo : 1 }";
}
virtual LockType locktype() const { return NONE; }
- bool run(const string&, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
- recursive_scoped_lock lock(ClientCursor::ccmutex);
- result.append("totalOpen", unsigned( ClientCursor::clientCursorsById.size() ) );
- result.append("byLocation_size", unsigned( ClientCursor::byLoc.size() ) );
- result.append("clientCursors_size", unsigned( ClientCursor::clientCursorsById.size() ) );
+ bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ ClientCursor::appendStats( result );
return true;
}
} cmdCursorInfo;
diff --git a/db/clientcursor.h b/db/clientcursor.h
index 32453fd..6f79dcf 100644
--- a/db/clientcursor.h
+++ b/db/clientcursor.h
@@ -45,8 +45,6 @@ namespace mongo {
*/
typedef map<CursorId, ClientCursor*> CCById;
- typedef multimap<DiskLoc, ClientCursor*> CCByLoc;
-
extern BSONObj id_obj;
class ClientCursor {
@@ -64,14 +62,13 @@ namespace mongo {
ElapsedTracker _yieldSometimesTracker;
static CCById clientCursorsById;
- static CCByLoc byLoc;
- static boost::recursive_mutex ccmutex; // must use this for all statics above!
-
- static CursorId allocCursorId_inlock();
-
-
+ static long long numberTimedOut;
+ static boost::recursive_mutex ccmutex; // must use this for all statics above!
+ static CursorId allocCursorId_inlock();
public:
+ static void assertNoCursors();
+
/* use this to assure we don't in the background time out cursor while it is under use.
if you are using noTimeout() already, there is no risk anyway.
Further, this mechanism guards against two getMore requests on the same cursor executing
@@ -139,19 +136,23 @@ namespace mongo {
};
/*const*/ CursorId cursorid;
- string ns;
- shared_ptr<Cursor> c;
+ const string ns;
+ const shared_ptr<Cursor> c;
int pos; // # objects into the cursor so far
BSONObj query;
- int _queryOptions; // see enum QueryOptions dbclient.h
+ const int _queryOptions; // see enum QueryOptions dbclient.h
OpTime _slaveReadTill;
+ Database * const _db;
ClientCursor(int queryOptions, shared_ptr<Cursor>& _c, const string& _ns) :
_idleAgeMillis(0), _pinValue(0),
_doingDeletes(false), _yieldSometimesTracker(128,10),
ns(_ns), c(_c),
- pos(0), _queryOptions(queryOptions)
+ pos(0), _queryOptions(queryOptions),
+ _db( cc().database() )
{
+ assert( _db );
+ assert( str::startsWith(_ns, _db->name) );
if( queryOptions & QueryOption_NoCursorTimeout )
noTimeout();
recursive_scoped_lock lock(ccmutex);
@@ -308,10 +309,7 @@ namespace mongo {
/**
* @param millis amount of idle passed time since last call
*/
- bool shouldTimeout( unsigned millis ){
- _idleAgeMillis += millis;
- return _idleAgeMillis > 600000 && _pinValue == 0;
- }
+ bool shouldTimeout( unsigned millis );
void storeOpForSlave( DiskLoc last );
void updateSlaveLocation( CurOp& curop );
@@ -327,12 +325,18 @@ private:
void noTimeout() {
_pinValue++;
}
+
+ multimap<DiskLoc, ClientCursor*>& byLoc() {
+ return _db->ccByLoc;
+ }
public:
void setDoingDeletes( bool doingDeletes ){
_doingDeletes = doingDeletes;
}
+
+ static void appendStats( BSONObjBuilder& result );
- static unsigned byLocSize(); // just for diagnostics
+ static unsigned numCursors() { return clientCursorsById.size(); }
static void informAboutToDeleteBucket(const DiskLoc& b);
static void aboutToDelete(const DiskLoc& dl);
diff --git a/db/cmdline.cpp b/db/cmdline.cpp
index d79cb87..65ee179 100644
--- a/db/cmdline.cpp
+++ b/db/cmdline.cpp
@@ -45,7 +45,7 @@ namespace mongo {
("bind_ip", po::value<string>(&cmdLine.bind_ip), "comma separated list of ip addresses to listen on - all local ips by default")
("logpath", po::value<string>() , "file to send all output to instead of stdout" )
("logappend" , "append to logpath instead of over-writing" )
- ("pidfilepath", po::value<string>(), "directory for pidfile (if not set, no pidfile is created)")
+ ("pidfilepath", po::value<string>(), "full path to pidfile (if not set, no pidfile is created)")
#ifndef _WIN32
("fork" , "fork server process" )
#endif
diff --git a/db/curop.h b/db/curop.h
index 81fa0e4..fbeda9f 100644
--- a/db/curop.h
+++ b/db/curop.h
@@ -85,13 +85,7 @@ namespace mongo {
int querySize() const { return *((int *) _queryBuf); }
bool haveQuery() const { return querySize() != 0; }
- BSONObj query() {
- if( querySize() == 1 ) {
- return _tooBig;
- }
- BSONObj o(_queryBuf);
- return o;
- }
+ BSONObj query( bool threadSafe = false);
void ensureStarted(){
if ( _start == 0 )
diff --git a/db/database.h b/db/database.h
index ff0e814..c7d72c5 100644
--- a/db/database.h
+++ b/db/database.h
@@ -22,6 +22,8 @@
namespace mongo {
+ class ClientCursor;
+
/**
* Database represents a database database
* Each database database has its own set of files -- dbname.ns, dbname.0, dbname.1, ...
@@ -196,6 +198,9 @@ namespace mongo {
NamespaceIndex namespaceIndex;
int profile; // 0=off.
string profileName; // "alleyinsider.system.profile"
+
+ multimap<DiskLoc, ClientCursor*> ccByLoc;
+
int magic; // used for making sure the object is still loaded in memory
};
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index 34d00c8..22b0457 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -160,32 +160,6 @@ namespace mongo {
}
} cmdGetPrevError;
- class CmdSwitchToClientErrors : public Command {
- public:
- virtual bool requiresAuth() { return false; }
- virtual bool logTheOp() {
- return false;
- }
- virtual void help( stringstream& help ) const {
- help << "convert to id based errors rather than connection based";
- }
- virtual bool slaveOk() const {
- return true;
- }
- virtual LockType locktype() const { return NONE; }
- CmdSwitchToClientErrors() : Command("switchToClientErrors", false, "switchtoclienterrors") {}
- bool run(const string& dbnamne , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- if ( lastError.getID() ){
- errmsg = "already in client id mode";
- return false;
- }
- LastError *le = lastError.disableForCommand();
- le->overridenById = true;
- result << "ok" << 1;
- return true;
- }
- } cmdSwitchToClientErrors;
-
class CmdDropDatabase : public Command {
public:
virtual bool logTheOp() {
@@ -293,7 +267,6 @@ namespace mongo {
}
bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
-
long long start = Listener::getElapsedTimeMillis();
BSONObjBuilder timeBuilder(128);
@@ -382,7 +355,13 @@ namespace mongo {
globalFlushCounters.append( bb );
bb.done();
}
-
+
+ {
+ BSONObjBuilder bb( result.subobjStart( "cursros" ) );
+ ClientCursor::appendStats( bb );
+ bb.done();
+ }
+
timeBuilder.appendNumber( "after counters" , Listener::getElapsedTimeMillis() - start );
if ( anyReplEnabled() ){
@@ -649,10 +628,10 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << "create a collection";
}
- virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
string ns = dbname + '.' + cmdObj.firstElement().valuestr();
string err;
- bool ok = userCreateNS(ns.c_str(), cmdObj, err, true);
+ bool ok = userCreateNS(ns.c_str(), cmdObj, err, ! fromRepl );
if ( !ok && !err.empty() )
errmsg = err;
return ok;
diff --git a/db/dbcommands_generic.cpp b/db/dbcommands_generic.cpp
index 6274394..340f31c 100644
--- a/db/dbcommands_generic.cpp
+++ b/db/dbcommands_generic.cpp
@@ -60,6 +60,13 @@ namespace mongo {
bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
result << "version" << versionString << "gitVersion" << gitVersion() << "sysInfo" << sysInfo();
result << "bits" << ( sizeof( int* ) == 4 ? 32 : 64 );
+ result.appendBool( "debug" ,
+#ifdef _DEBUG
+ true
+#else
+ false
+#endif
+ );
return true;
}
} cmdBuildInfo;
diff --git a/db/lasterror.cpp b/db/lasterror.cpp
index 9fc5512..630fcfb 100644
--- a/db/lasterror.cpp
+++ b/db/lasterror.cpp
@@ -168,9 +168,7 @@ namespace mongo {
}
LastError * LastErrorHolder::startRequest( Message& m , int clientId ) {
-
- if ( clientId == 0 )
- clientId = m.header()->id & 0xFFFF0000;
+ assert( clientId );
setID( clientId );
LastError * le = _get( true );
@@ -179,11 +177,7 @@ namespace mongo {
}
void LastErrorHolder::startRequest( Message& m , LastError * connectionOwned ) {
- if ( !connectionOwned->overridenById ) {
- prepareErrForNewRequest( m, connectionOwned );
- return;
- }
- startRequest(m);
+ prepareErrForNewRequest( m, connectionOwned );
}
void LastErrorHolder::disconnect( int clientId ){
diff --git a/db/lasterror.h b/db/lasterror.h
index 5900208..2006f1c 100644
--- a/db/lasterror.h
+++ b/db/lasterror.h
@@ -32,7 +32,6 @@ namespace mongo {
long long nObjects;
int nPrev;
bool valid;
- bool overridenById;
bool disabled;
void writeback( OID& oid ){
reset( true );
@@ -56,7 +55,6 @@ namespace mongo {
nObjects = nDeleted;
}
LastError() {
- overridenById = false;
reset();
}
void reset( bool _valid = false ) {
@@ -127,7 +125,7 @@ namespace mongo {
/** when db receives a message/request, call this */
void startRequest( Message& m , LastError * connectionOwned );
- LastError * startRequest( Message& m , int clientId = 0 );
+ LastError * startRequest( Message& m , int clientId );
void disconnect( int clientId );
diff --git a/db/mr.cpp b/db/mr.cpp
index 8fa8d50..7786c85 100644
--- a/db/mr.cpp
+++ b/db/mr.cpp
@@ -176,11 +176,11 @@ namespace mongo {
{ // query options
if ( cmdObj["query"].type() == Object ){
filter = cmdObj["query"].embeddedObjectUserCheck();
- q = filter;
}
- if ( cmdObj["sort"].type() == Object )
- q.sort( cmdObj["sort"].embeddedObjectUserCheck() );
+ if ( cmdObj["sort"].type() == Object ){
+ sort = cmdObj["sort"].embeddedObjectUserCheck();
+ }
if ( cmdObj["limit"].isNumber() )
limit = cmdObj["limit"].numberLong();
@@ -222,7 +222,7 @@ namespace mongo {
// query options
BSONObj filter;
- Query q;
+ BSONObj sort;
long long limit;
// functions
@@ -444,7 +444,7 @@ namespace mongo {
readlock lock( mr.ns );
Client::Context ctx( mr.ns );
- shared_ptr<Cursor> temp = bestGuessCursor( mr.ns.c_str(), mr.filter, BSONObj() );
+ shared_ptr<Cursor> temp = bestGuessCursor( mr.ns.c_str(), mr.filter, mr.sort );
auto_ptr<ClientCursor> cursor( new ClientCursor( QueryOption_NoCursorTimeout , temp , mr.ns.c_str() ) );
Timer mt;
diff --git a/db/pdfile.cpp b/db/pdfile.cpp
index 787e070..cf7cb22 100644
--- a/db/pdfile.cpp
+++ b/db/pdfile.cpp
@@ -728,8 +728,11 @@ namespace mongo {
try {
assert( dropIndexes(d, name.c_str(), "*", errmsg, result, true) );
}
- catch( DBException& ) {
- uasserted(12503,"drop: dropIndexes for collection failed - consider trying repair");
+ catch( DBException& e ) {
+ stringstream ss;
+ ss << "drop: dropIndexes for collection failed - consider trying repair ";
+ ss << " cause: " << e.what();
+ uasserted(12503,ss.str());
}
assert( d->nIndexes == 0 );
}
@@ -1892,7 +1895,10 @@ namespace mongo {
bb.done();
if( nNotClosed )
result.append("nNotClosed", nNotClosed);
-
+ else {
+ ClientCursor::assertNoCursors();
+ }
+
return true;
}
diff --git a/db/query.cpp b/db/query.cpp
index 3d251a0..bfe845c 100644
--- a/db/query.cpp
+++ b/db/query.cpp
@@ -654,11 +654,12 @@ namespace mongo {
if ( !ClientCursor::recoverFromYield( _yieldData ) ) {
_c.reset();
_cc.reset();
+ _so.reset();
massert( 13338, "cursor dropped during query", false );
// TODO maybe we want to prevent recording the winning plan as well?
}
}
- }
+ }
virtual void next() {
if ( _findingStartCursor.get() ) {
@@ -772,7 +773,8 @@ namespace mongo {
_n = _inMemSort ? _so->size() : _n;
}
else if ( _inMemSort ) {
- _so->fill( _buf, _pq.getFields() , _n );
+ if( _so.get() )
+ _so->fill( _buf, _pq.getFields() , _n );
}
if ( _pq.hasOption( QueryOption_CursorTailable ) && _pq.getNumToReturn() != 1 )
diff --git a/db/repl/rs.cpp b/db/repl/rs.cpp
index 3e12e42..a6737be 100644
--- a/db/repl/rs.cpp
+++ b/db/repl/rs.cpp
@@ -123,6 +123,7 @@ namespace mongo {
void ReplSetImpl::_fillIsMaster(BSONObjBuilder& b) {
const StateBox::SP sp = box.get();
bool isp = sp.state.primary();
+ b.append("setName", name());
b.append("ismaster", isp);
b.append("secondary", sp.state.secondary());
{
diff --git a/db/replpair.h b/db/replpair.h
index 19b79bd..1da8b78 100644
--- a/db/replpair.h
+++ b/db/replpair.h
@@ -164,7 +164,7 @@ namespace mongo {
if( replSet ) {
/* todo: speed up the secondary case. as written here there are 2 mutex entries, it can be 1. */
if( isMaster() ) return;
- notMasterUnless( pq.hasOption(QueryOption_SlaveOk) && theReplSet->isSecondary() );
+ notMasterUnless( pq.hasOption(QueryOption_SlaveOk) && theReplSet && theReplSet->isSecondary() );
} else {
notMasterUnless(isMaster() || pq.hasOption(QueryOption_SlaveOk) || replSettings.slave == SimpleSlave );
}
diff --git a/db/restapi.cpp b/db/restapi.cpp
index 3fd39c2..e9a7ae2 100644
--- a/db/restapi.cpp
+++ b/db/restapi.cpp
@@ -266,8 +266,8 @@ namespace mongo {
ss << "# databases: " << dbHolder.size() << '\n';
- if( ClientCursor::byLocSize()>500 )
- ss << "Cursors byLoc.size(): " << ClientCursor::byLocSize() << '\n';
+ if( ClientCursor::numCursors()>500 )
+ ss << "# Cursors: " << ClientCursor::numCursors() << '\n';
ss << "\nreplication: ";
if( *replInfo )
diff --git a/db/scanandorder.h b/db/scanandorder.h
index 48f5aa6..8d63b9a 100644
--- a/db/scanandorder.h
+++ b/db/scanandorder.h
@@ -88,16 +88,17 @@ namespace mongo {
void _add(BSONObj& k, BSONObj o, DiskLoc* loc) {
if (!loc){
- best.insert(make_pair(k,o));
+ best.insert(make_pair(k.getOwned(),o.getOwned()));
} else {
BSONObjBuilder b;
b.appendElements(o);
b.append("$diskLoc", loc->toBSONObj());
- best.insert(make_pair(k, b.obj()));
+ best.insert(make_pair(k.getOwned(), b.obj().getOwned()));
}
}
void _addIfBetter(BSONObj& k, BSONObj o, BestMap::iterator i, DiskLoc* loc) {
+ /* todo : we don't correct approxSize here. */
const BSONObj& worstBestKey = i->first;
int c = worstBestKey.woCompare(k, order.pattern);
if ( c > 0 ) {
@@ -124,7 +125,11 @@ namespace mongo {
BSONObj k = order.getKeyFromObject(o);
if ( (int) best.size() < limit ) {
approxSize += k.objsize();
- uassert( 10128 , "too much key data for sort() with no index. add an index or specify a smaller limit", approxSize < 1 * 1024 * 1024 );
+ approxSize += o.objsize();
+
+ /* note : adjust when bson return limit adjusts. note this limit should be a bit higher. */
+ uassert( 10128 , "too much data for sort() with no index. add an index or specify a smaller limit", approxSize < 32 * 1024 * 1024 );
+
_add(k, o, loc);
return;
}
diff --git a/db/update.cpp b/db/update.cpp
index cbf93ba..d4a038b 100644
--- a/db/update.cpp
+++ b/db/update.cpp
@@ -527,7 +527,7 @@ namespace mongo {
string field = root + e.fieldName();
FieldCompareResult cmp = compareDottedFieldNames( m->second.m->fieldName , field );
- DEBUGUPDATE( "\t\t\t field:" << field << "\t mod:" << m->second.m->fieldName << "\t cmp:" << cmp );
+ DEBUGUPDATE( "\t\t\t field:" << field << "\t mod:" << m->second.m->fieldName << "\t cmp:" << cmp << "\t short: " << e.fieldName() );
switch ( cmp ){
@@ -550,6 +550,13 @@ namespace mongo {
e = es.next();
m++;
}
+ else {
+ // this is a very weird case
+ // have seen it in production, but can't reproduce
+ // this assert prevents an inf. loop
+ // but likely isn't the correct solution
+ assert(0);
+ }
continue;
}
case LEFT_BEFORE: // Mod on a field that doesn't exist
diff --git a/debian/changelog b/debian/changelog
index 529c796..a0777fd 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,19 +1,33 @@
-mongodb (1.6.0) unstable; urgency=low
+mongodb (1.6.2) unstable; urgency=low
+
+ * replica_sets some fixes
+ * sharding some fixes with rs
+ * full change log http://jira.mongodb.org/browse/SERVER/fixforversion/10187
+
+ -- Richard Kreuter <richard@10gen.com> Wed, 1 Sep 2010 16:56:28 -0500
+
+
+mongodb (1.6.1) unstable; urgency=low
* replica_sets some fixes
* sharding some fixes with rs
* full change log http://jira.mongodb.org/browse/SERVER/fixforversion/10183
+ -- Richard Kreuter <richard@10gen.com> Tue, 17 Aug 2010 16:56:28 -0500
+
mongodb (1.6.0) unstable; urgency=low
* sharding stable
* replica_sets stable
+ -- Richard Kreuter <richard@10gen.com> Thu, 05 Aug 2010 16:56:28 -0500
+
mongodb (1.5.8) unstable; urgency=low
* sharding lots of changes
* replica_sets lots of changes
+ -- Richard Kreuter <richard@10gen.com> Tue, 03 Aug 2010 16:56:28 -0500
mongodb (1.5.7) unstable; urgency=low
diff --git a/debian/files b/debian/files
new file mode 100644
index 0000000..2e28959
--- /dev/null
+++ b/debian/files
@@ -0,0 +1 @@
+mongodb_0.9.7_amd64.deb devel optional
diff --git a/debian/mongodb.upstart b/debian/mongodb.upstart
new file mode 100644
index 0000000..ca6f9b7
--- /dev/null
+++ b/debian/mongodb.upstart
@@ -0,0 +1,15 @@
+# Ubuntu upstart file at /etc/init/mongodb.conf
+
+pre-start script
+ mkdir -p /var/lib/mongodb/
+ mkdir -p /var/log/mongodb/
+end script
+
+start on runlevel [2345]
+stop on runlevel [06]
+
+script
+ ENABLE_MONGODB="yes"
+ if [ -f /etc/default/mongodb ]; then . /etc/default/mongodb; fi
+ if [ "x$ENABLE_MONGODB" = "xyes" ]; then exec start-stop-daemon --start --quiet --chuid mongodb --exec /usr/bin/mongod -- --config /etc/mongodb.conf; fi
+end script
diff --git a/debian/preinst b/debian/preinst
new file mode 100644
index 0000000..c2d5362
--- /dev/null
+++ b/debian/preinst
@@ -0,0 +1,37 @@
+#!/bin/sh
+# preinst script for mongodb
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * <new-preinst> `install'
+# * <new-preinst> `install' <old-version>
+# * <new-preinst> `upgrade' <old-version>
+# * <old-preinst> `abort-upgrade' <new-version>
+# for details, see http://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ install|upgrade)
+ ;;
+
+ abort-upgrade)
+ ;;
+
+ *)
+ echo "preinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
+
+
diff --git a/doxygenConfig b/doxygenConfig
index a5e3f0c..889bd4f 100644
--- a/doxygenConfig
+++ b/doxygenConfig
@@ -3,7 +3,7 @@
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = MongoDB
-PROJECT_NUMBER = 1.6.1
+PROJECT_NUMBER = 1.6.2
OUTPUT_DIRECTORY = docs/doxygen
CREATE_SUBDIRS = NO
OUTPUT_LANGUAGE = English
diff --git a/jstests/basic3.js b/jstests/basic3.js
index b1ebafd..2deee2b 100644
--- a/jstests/basic3.js
+++ b/jstests/basic3.js
@@ -1,5 +1,5 @@
-t = db.getCollection( "foo" );
+t = db.getCollection( "foo_basic3" );
t.find( { "a.b" : 1 } ).toArray();
diff --git a/jstests/basic9.js b/jstests/basic9.js
index 5920418..3395a41 100644
--- a/jstests/basic9.js
+++ b/jstests/basic9.js
@@ -1,5 +1,5 @@
-t = db.getCollection( "foo" );
+t = db.getCollection( "foo_basic9" );
t.save( { "foo$bar" : 5 } );
diff --git a/jstests/dbcase.js b/jstests/dbcase.js
index bf0c8e6..d76b739 100644
--- a/jstests/dbcase.js
+++ b/jstests/dbcase.js
@@ -1,6 +1,6 @@
-a = db.getSisterDB( "test_dbnamea" )
-b = db.getSisterDB( "test_dbnameA" )
+a = db.getSisterDB( "dbcasetest_dbnamea" )
+b = db.getSisterDB( "dbcasetest_dbnameA" )
a.dropDatabase();
b.dropDatabase();
diff --git a/jstests/delx.js b/jstests/delx.js
new file mode 100644
index 0000000..3f8c88c
--- /dev/null
+++ b/jstests/delx.js
@@ -0,0 +1,31 @@
+
+a = db.getSisterDB("delxa" )
+b = db.getSisterDB("delxb" )
+
+function setup( mydb ){
+ mydb.dropDatabase();
+ for ( i=0; i<100; i++ ){
+ mydb.foo.insert( { _id : i } );
+ }
+ mydb.getLastError();
+}
+
+setup( a );
+setup( b );
+
+assert.eq( 100 , a.foo.find().itcount() , "A1" )
+assert.eq( 100 , b.foo.find().itcount() , "A2" )
+
+x = a.foo.find().sort( { _id : 1 } ).batchSize( 60 )
+y = b.foo.find().sort( { _id : 1 } ).batchSize( 60 )
+
+x.next();
+y.next();
+
+a.foo.remove( { _id : { $gt : 50 } } );
+
+assert.eq( 51 , a.foo.find().itcount() , "B1" )
+assert.eq( 100 , b.foo.find().itcount() , "B2" )
+
+assert.eq( 59 , x.itcount() , "C1" )
+assert.eq( 99 , y.itcount() , "C2" ); // this was asserting because ClientCursor byLoc doesn't take db into consideration
diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js
new file mode 100644
index 0000000..92a54da
--- /dev/null
+++ b/jstests/sharding/addshard2.js
@@ -0,0 +1,52 @@
+
+// don't start any shards, yet
+s = new ShardingTest( "add_shard2", 1, 0, 1, {useHostname : true} );
+
+var conn1 = startMongodTest( 30001 , "add_shard21" , 0 , {useHostname : true} );
+var conn2 = startMongodTest( 30002 , "add_shard22" , 0 , {useHostname : true} );
+
+var rs1 = new ReplSetTest( { "name" : "add_shard2_rs1", nodes : 3 , startPort : 31200 } );
+rs1.startSet();
+rs1.initiate();
+var master1 = rs1.getMaster();
+
+var rs2 = new ReplSetTest( { "name" : "add_shard2_rs2", nodes : 3 , startPort : 31203 } );
+rs2.startSet();
+rs2.initiate();
+var master2 = rs2.getMaster();
+
+// step 1. name given
+assert(s.admin.runCommand({"addshard" : getHostName()+":30001", "name" : "bar"}).ok, "failed to add shard in step 1");
+var shard = s.getDB("config").shards.findOne({"_id" : {"$nin" : ["shard0000"]}});
+assert(shard, "shard wasn't found");
+assert.eq("bar", shard._id, "shard has incorrect name");
+
+// step 2. replica set
+assert(s.admin.runCommand({"addshard" : "add_shard2_rs1/"+getHostName()+":31200"}).ok, "failed to add shard in step 2");
+shard = s.getDB("config").shards.findOne({"_id" : {"$nin" : ["shard0000", "bar"]}});
+assert(shard, "shard wasn't found");
+assert.eq("add_shard2_rs1", shard._id, "t2 name");
+
+// step 3. replica set w/ name given
+assert(s.admin.runCommand({"addshard" : "add_shard2_rs2/"+getHostName()+":31203", "name" : "myshard"}).ok,
+ "failed to add shard in step 4");
+shard = s.getDB("config").shards.findOne({"_id" : {"$nin" : ["shard0000", "bar", "add_shard2_rs1"]}});
+assert(shard, "shard wasn't found");
+assert.eq("myshard", shard._id, "t3 name");
+
+// step 4. no name given
+assert(s.admin.runCommand({"addshard" : getHostName()+":30002"}).ok, "failed to add shard in step 4");
+shard = s.getDB("config").shards.findOne({"_id" : {"$nin" : ["shard0000", "bar", "add_shard2_rs1", "myshard"]}});
+assert(shard, "shard wasn't found");
+assert.eq("shard0001", shard._id, "t4 name");
+
+assert.eq(s.getDB("config").shards.count(), 5, "unexpected number of shards");
+
+// step 5. replica set w/ a wrong host
+assert(!s.admin.runCommand({"addshard" : "add_shard2_rs2/NonExistingHost:31203"}).ok, "accepted bad hostname in step 5");
+
+// step 6. replica set w/ mixed wrong/right hosts
+assert(!s.admin.runCommand({"addshard" : "add_shard2_rs2/"+getHostName()+":31203,foo:9999"}).ok,
+ "accepted bad hostname in step 6");
+
+s.stop(); \ No newline at end of file
diff --git a/jstests/capped4.js b/jstests/slowNightly/capped4.js
index 14d5bd0..01af8f2 100644
--- a/jstests/capped4.js
+++ b/jstests/slowNightly/capped4.js
@@ -25,4 +25,10 @@ assert( t.findOne( { i : i - 1 } ), "E" );
t.remove( { i : i - 1 } );
assert( db.getLastError().indexOf( "capped" ) >= 0, "F" );
-assert( t.validate().valid, "G" );
+assert( t.validate().valid, "G" );
+
+/* there is a cursor open here, so this is a convenient place for a quick cursor test. */
+
+db._adminCommand("closeAllDatabases");
+
+//assert( db.serverStatus().cursors.totalOpen == 0, "cursors open and shouldn't be");
diff --git a/jstests/cursor8.js b/jstests/slowNightly/cursor8.js
index b50fe3b..b50fe3b 100644
--- a/jstests/cursor8.js
+++ b/jstests/slowNightly/cursor8.js
diff --git a/jstests/recstore.js b/jstests/slowNightly/recstore.js
index f2e78e2..f2e78e2 100644
--- a/jstests/recstore.js
+++ b/jstests/slowNightly/recstore.js
diff --git a/lib/libboost_thread-gcc41-mt-d-1_34_1.a b/lib/libboost_thread-gcc41-mt-d-1_34_1.a
new file mode 100644
index 0000000..09377ac
--- /dev/null
+++ b/lib/libboost_thread-gcc41-mt-d-1_34_1.a
Binary files differ
diff --git a/rpm/mongo.spec b/rpm/mongo.spec
index 7044c0d..8224de4 100644
--- a/rpm/mongo.spec
+++ b/rpm/mongo.spec
@@ -1,5 +1,5 @@
Name: mongo
-Version: 1.6.1
+Version: 1.6.2
Release: mongodb_1%{?dist}
Summary: mongo client shell and tools
License: AGPL 3.0
diff --git a/s/commands_admin.cpp b/s/commands_admin.cpp
index 685d020..551b8a9 100644
--- a/s/commands_admin.cpp
+++ b/s/commands_admin.cpp
@@ -616,29 +616,44 @@ namespace mongo {
help << "add a new shard to the system";
}
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
- HostAndPort shardAddr( cmdObj.firstElement().valuestrsafe() );
- if ( shardAddr.isLocalHost() != grid.allowLocalHost() ){
- errmsg = "can't use localhost as a shard since all shards need to communicate. "
- "either use all shards and configdbs in localhost or all in actual IPs " ;
- log() << "addshard request " << cmdObj << " failed: attempt to mix localhosts and IPs" << endl;
+ errmsg.clear();
+
+ // get replica set component hosts
+ ConnectionString servers = ConnectionString::parse( cmdObj.firstElement().valuestrsafe() , errmsg );
+ if ( ! errmsg.empty() ){
+ log() << "addshard request " << cmdObj << " failed:" << errmsg << endl;
return false;
}
- if ( ! shardAddr.hasPort() ){
- shardAddr.setPort( CmdLine::ShardServerPort );
+ // using localhost in server names implies every other process must use locahost addresses too
+ vector<HostAndPort> serverAddrs = servers.getServers();
+ for ( size_t i = 0 ; i < serverAddrs.size() ; i++ ){
+ if ( serverAddrs[i].isLocalHost() != grid.allowLocalHost() ){
+ errmsg = "can't use localhost as a shard since all shards need to communicate. "
+ "either use all shards and configdbs in localhost or all in actual IPs " ;
+ log() << "addshard request " << cmdObj << " failed: attempt to mix localhosts and IPs" << endl;
+ return false;
+ }
+
+ // it's fine if mongods of a set all use default port
+ if ( ! serverAddrs[i].hasPort() ){
+ serverAddrs[i].setPort( CmdLine::ShardServerPort );
+ }
}
+ // name is optional; addShard will provide one if needed
string name = "";
if ( cmdObj["name"].type() == String ) {
name = cmdObj["name"].valuestrsafe();
}
+ // maxSize is the space usage cap in a shard in MBs
long long maxSize = 0;
if ( cmdObj[ ShardFields::maxSize.name() ].isNumber() ){
maxSize = cmdObj[ ShardFields::maxSize.name() ].numberLong();
}
- if ( ! grid.addShard( &name , shardAddr.toString() , maxSize , errmsg ) ){
+ if ( ! grid.addShard( &name , servers , maxSize , errmsg ) ){
log() << "addshard request " << cmdObj << " failed: " << errmsg << endl;
return false;
}
diff --git a/s/commands_public.cpp b/s/commands_public.cpp
index 3dbc8ad..91563d2 100644
--- a/s/commands_public.cpp
+++ b/s/commands_public.cpp
@@ -745,6 +745,7 @@ namespace mongo {
fn == "reduce" ||
fn == "query" ||
fn == "sort" ||
+ fn == "scope" ||
fn == "verbose" ){
b.append( e );
}
diff --git a/s/grid.cpp b/s/grid.cpp
index d206f14..e4991b2 100644
--- a/s/grid.cpp
+++ b/s/grid.cpp
@@ -115,41 +115,107 @@ namespace mongo {
_allowLocalShard = allow;
}
- bool Grid::addShard( string* name , const string& host , long long maxSize , string& errMsg ){
- // name is optional
+ bool Grid::addShard( string* name , const ConnectionString& servers , long long maxSize , string& errMsg ){
+ // name can be NULL, so privide a dummy one here to avoid testing it elsewhere
string nameInternal;
if ( ! name ) {
name = &nameInternal;
}
- // Check whether the host exists and is operative. In order to be accepted as a new shard, that
- // mongod must not have any database name that exists already in any other shards. If that
- // test passes, the new shard's databases are going to be entered as non-sharded db's whose
- // primary is the newly added shard.
+ // Check whether the host (or set) exists and run several sanity checks on this request.
+ // There are two set of sanity checks: making sure adding this particular shard is consistent
+ // with the replica set state (if it exists) and making sure this shards databases can be
+ // brought into the grid without conflict.
vector<string> dbNames;
try {
- ScopedDbConnection newShardConn( host );
+ ScopedDbConnection newShardConn( servers );
newShardConn->getLastError();
if ( newShardConn->type() == ConnectionString::SYNC ){
newShardConn.done();
- errMsg = "can't use sync cluster as a shard. for replica set, have to use <name>/<server1>,<server2>,...";
+ errMsg = "can't use sync cluster as a shard. for replica set, have to use <setname>/<server1>,<server2>,...";
return false;
}
- // get the shard's local db's listing
- BSONObj res;
- bool ok = newShardConn->runCommand( "admin" , BSON( "listDatabases" << 1 ) , res );
+ BSONObj resIsMaster;
+ bool ok = newShardConn->runCommand( "admin" , BSON( "isMaster" << 1 ) , resIsMaster );
if ( !ok ){
ostringstream ss;
- ss << "failed listing " << host << " databases:" << res;
+ ss << "failed running isMaster: " << resIsMaster;
errMsg = ss.str();
newShardConn.done();
return false;
}
- BSONObjIterator i( res["databases"].Obj() );
+ // if the shard has only one host, make sure it is not part of a replica set
+ string setName = resIsMaster["setName"].str();
+ string commandSetName = servers.getSetName();
+ if ( commandSetName.empty() && ! setName.empty() ){
+ ostringstream ss;
+ ss << "host is part of set: " << setName << " use replica set url format <setname>/<server1>,<server2>,....";
+ errMsg = ss.str();
+ newShardConn.done();
+ return false;
+ }
+
+ // if the shard is part of replica set, make sure it is the right one
+ if ( ! commandSetName.empty() && ( commandSetName != setName ) ){
+ ostringstream ss;
+ ss << "host is part of a different set: " << setName;
+ errMsg = ss.str();
+ newShardConn.done();
+ return false;
+ }
+
+ // if the shard is part of a replica set, make sure all the hosts mentioned in 'servers' are part of
+ // the set. It is fine if not all members of the set are present in 'servers'.
+ bool foundAll = true;
+ string offendingHost;
+ if ( ! commandSetName.empty() ){
+ set<string> hostSet;
+ BSONObjIterator iter( resIsMaster["hosts"].Obj() );
+ while ( iter.more() ){
+ hostSet.insert( iter.next().String() ); // host:port
+ }
+
+ vector<HostAndPort> hosts = servers.getServers();
+ for ( size_t i = 0 ; i < hosts.size() ; i++ ){
+ string host = hosts[i].toString(); // host:port
+ if ( hostSet.find( host ) == hostSet.end() ){
+ offendingHost = host;
+ foundAll = false;
+ break;
+ }
+ }
+ }
+ if ( ! foundAll ){
+ ostringstream ss;
+ ss << "host " << offendingHost << " does not belong to replica set " << setName;;
+ errMsg = ss.str();
+ newShardConn.done();
+ return false;
+ }
+
+ // shard name defaults to the name of the replica set
+ if ( name->empty() && ! setName.empty() )
+ *name = setName;
+
+ // In order to be accepted as a new shard, that mongod must not have any database name that exists already
+ // in any other shards. If that test passes, the new shard's databases are going to be entered as
+ // non-sharded db's whose primary is the newly added shard.
+
+ BSONObj resListDB;
+ ok = newShardConn->runCommand( "admin" , BSON( "listDatabases" << 1 ) , resListDB );
+ if ( !ok ){
+ ostringstream ss;
+ ss << "failed listing " << servers.toString() << "'s databases:" << resListDB;
+ errMsg = ss.str();
+ newShardConn.done();
+ return false;
+ }
+
+ BSONObjIterator i( resListDB["databases"].Obj() );
while ( i.more() ){
BSONObj dbEntry = i.next().Obj();
const string& dbName = dbEntry["name"].String();
@@ -176,7 +242,7 @@ namespace mongo {
DBConfigPtr config = getDBConfig( *it , false );
if ( config.get() != NULL ){
ostringstream ss;
- ss << "trying to add shard " << host << " because local database " << *it;
+ ss << "trying to add shard " << servers.toString() << " because local database " << *it;
ss << " exists in another " << config->getPrimary().toString();
errMsg = ss.str();
return false;
@@ -192,7 +258,7 @@ namespace mongo {
// build the ConfigDB shard document
BSONObjBuilder b;
b.append( "_id" , *name );
- b.append( "host" , host );
+ b.append( "host" , servers.toString() );
if ( maxSize > 0 ){
b.append( ShardFields::maxSize.name() , maxSize );
}
@@ -201,8 +267,8 @@ namespace mongo {
{
ScopedDbConnection conn( configServer.getPrimary() );
- // check whether this host:port is not an already a known shard
- BSONObj old = conn->findOne( ShardNS::shard , BSON( "host" << host ) );
+ // check whether the set of hosts (or single host) is not an already a known shard
+ BSONObj old = conn->findOne( ShardNS::shard , BSON( "host" << servers.toString() ) );
if ( ! old.isEmpty() ){
errMsg = "host already used";
conn.done();
@@ -228,7 +294,7 @@ namespace mongo {
for ( vector<string>::const_iterator it = dbNames.begin(); it != dbNames.end(); ++it ){
DBConfigPtr config = getDBConfig( *it , true , *name );
if ( ! config ){
- log() << "adding shard " << host << " even though could not add database " << *it << endl;
+ log() << "adding shard " << servers << " even though could not add database " << *it << endl;
}
}
diff --git a/s/grid.h b/s/grid.h
index c0e18ce..4f3c2ac 100644
--- a/s/grid.h
+++ b/s/grid.h
@@ -57,18 +57,17 @@ namespace mongo {
/**
*
* addShard will create a new shard in the grid. It expects a mongod process to be runing
- * on the provided address.
- * TODO - add the mongod's databases to the grid
+ * on the provided address. Adding a shard that is a replica set is supported.
*
* @param name is an optional string with the name of the shard. if ommited, grid will
* generate one and update the parameter.
- * @param host is the complete address of the machine where the shard will be
+ * @param servers is the connection string of the shard being added
* @param maxSize is the optional space quota in bytes. Zeros means there's no limitation to
* space usage
* @param errMsg is the error description in case the operation failed.
* @return true if shard was successfully added.
*/
- bool addShard( string* name , const string& host , long long maxSize , string& errMsg );
+ bool addShard( string* name , const ConnectionString& servers , long long maxSize , string& errMsg );
/**
* @return true if the config database knows about a host 'name'
diff --git a/s/server.cpp b/s/server.cpp
index 11f688c..c3dc24c 100644
--- a/s/server.cpp
+++ b/s/server.cpp
@@ -63,10 +63,7 @@ namespace mongo {
class ShardingConnectionHook : public DBConnectionHook {
public:
- virtual void onCreate( DBClientBase * conn ){
- if ( conn->type() != ConnectionString::SYNC )
- conn->simpleCommand( "admin" , 0 , "switchtoclienterrors" );
- }
+
virtual void onHandedOut( DBClientBase * conn ){
ClientInfo::get()->addShard( conn->getServerAddress() );
}
diff --git a/s/shard.cpp b/s/shard.cpp
index 8ef21a0..4d73a66 100644
--- a/s/shard.cpp
+++ b/s/shard.cpp
@@ -76,6 +76,12 @@ namespace mongo {
Shard s( name , host , maxSize , isDraining );
_lookup[name] = s;
_lookup[host] = s;
+
+ // add rs name to lookup (if it exists)
+ size_t pos;
+ if ((pos = host.find('/', 0)) != string::npos) {
+ _lookup[host.substr(0, pos)] = s;
+ }
}
}
@@ -90,6 +96,13 @@ namespace mongo {
{
scoped_lock lk( _mutex );
map<string,Shard>::iterator i = _lookup.find( ident );
+
+ // if normal find didn't find anything, try to find by rs name
+ size_t pos;
+ if ( i == _lookup.end() && (pos = ident.find('/', 0)) != string::npos) {
+ i = _lookup.find( ident.substr(0, pos) );
+ }
+
if ( i != _lookup.end() )
return i->second;
}
diff --git a/scripting/sm_db.cpp b/scripting/sm_db.cpp
index 855a50d..940d785 100644
--- a/scripting/sm_db.cpp
+++ b/scripting/sm_db.cpp
@@ -156,7 +156,7 @@ namespace mongo {
JSBool mongo_external_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
Convertor c( cx );
- uassert( 10238 , "0 or 1 args to Mongo" , argc <= 1 );
+ smuassert( cx , "0 or 1 args to Mongo" , argc <= 1 );
string host = "127.0.0.1";
if ( argc > 0 )
@@ -207,9 +207,9 @@ namespace mongo {
};
JSBool mongo_find(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
- uassert( 10240 , "mongo_find neesd 6 args" , argc == 6 );
+ smuassert( cx , "mongo_find needs 6 args" , argc == 6 );
shared_ptr< DBClientWithCommands > * connHolder = (shared_ptr< DBClientWithCommands >*)JS_GetPrivate( cx , obj );
- uassert( 10241 , "no connection!" , connHolder && connHolder->get() );
+ smuassert( cx , "no connection!" , connHolder && connHolder->get() );
DBClientWithCommands *conn = connHolder->get();
Convertor c( cx );
diff --git a/shell/servers.js b/shell/servers.js
index 23e52c6..7b306d7 100644
--- a/shell/servers.js
+++ b/shell/servers.js
@@ -62,6 +62,13 @@ startMongodTest = function (port, dirname, restart, extraOptions ) {
f = startMongodNoReset;
if (!dirname)
dirname = "" + port; // e.g., data/db/27000
+
+ var useHostname = false;
+ if (extraOptions) {
+ useHostname = extraOptions.useHostname;
+ delete extraOptions.useHostname;
+ }
+
var options =
{
@@ -78,7 +85,7 @@ startMongodTest = function (port, dirname, restart, extraOptions ) {
var conn = f.apply(null, [ options ] );
- conn.name = "localhost:" + port;
+ conn.name = (useHostname ? getHostName() : "localhost") + ":" + port;
return conn;
}
@@ -145,6 +152,10 @@ myPort = function() {
return 27017;
}
+/**
+ * otherParams can be:
+ * * useHostname to use the hostname (instead of localhost)
+ */
ShardingTest = function( testName , numShards , verboseLevel , numMongos , otherParams ){
this._testName = testName;
@@ -155,7 +166,7 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
if ( otherParams.sync && numShards < 3 )
throw "if you want sync, you need at least 3 servers";
- var localhost = "localhost";
+ var localhost = otherParams.useHostname ? getHostName() : "localhost";
this._alldbpaths = []
@@ -191,18 +202,18 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
}
else {
for ( var i=0; i<numShards; i++){
- var conn = startMongodTest( 30000 + i , testName + i );
+ var conn = startMongodTest( 30000 + i , testName + i, 0, {useHostname : otherParams.useHostname} );
this._alldbpaths.push( testName +i )
this._connections.push( conn );
}
if ( otherParams.sync ){
- this._configDB = "localhost:30000,localhost:30001,localhost:30002";
+ this._configDB = localhost+":30000,"+localhost+":30001,"+localhost+":30002";
this._configConnection = new Mongo( this._configDB );
this._configConnection.getDB( "config" ).settings.insert( { _id : "chunksize" , value : otherParams.chunksize || 50 } );
}
else {
- this._configDB = "localhost:30000";
+ this._configDB = localhost + ":30000";
this._connections[0].getDB( "config" ).settings.insert( { _id : "chunksize" , value : otherParams.chunksize || 50 } );
}
}
@@ -211,6 +222,7 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other
var startMongosPort = 31000;
for ( var i=0; i<(numMongos||1); i++ ){
var myPort = startMongosPort - i;
+ print("config: "+this._configDB);
var conn = startMongos( { port : startMongosPort - i , v : verboseLevel || 0 , configdb : this._configDB } );
conn.name = localhost + ":" + myPort;
this._mongos.push( conn );
diff --git a/util/version.cpp b/util/version.cpp
index aacebcd..bba3dd1 100644
--- a/util/version.cpp
+++ b/util/version.cpp
@@ -14,7 +14,7 @@ namespace mongo {
// mongo processes version support
//
- const char versionString[] = "1.6.1";
+ const char versionString[] = "1.6.2";
string mongodVersion() {
stringstream ss;