summaryrefslogtreecommitdiff
path: root/db
diff options
context:
space:
mode:
authorAntonin Kral <a.kral@bobek.cz>2012-03-01 13:43:25 +0100
committerAntonin Kral <a.kral@bobek.cz>2012-03-01 13:43:25 +0100
commitba59b00736b5b8dc0f0bd46397575aaf0cd4d44f (patch)
tree6364cf3f69677758a13f7aa5f6f06a8ebb652d98 /db
parentf0d9a01bccdaeb466c12c92057914bbfef59526c (diff)
downloadmongodb-ba59b00736b5b8dc0f0bd46397575aaf0cd4d44f.tar.gz
Imported Upstream version 2.0.3
Diffstat (limited to 'db')
-rw-r--r--db/cloner.cpp32
-rw-r--r--db/cloner.h2
-rw-r--r--db/dbcommands.cpp10
-rw-r--r--db/dbcommands_generic.cpp2
-rw-r--r--db/instance.cpp2
-rw-r--r--db/oplog.cpp38
-rw-r--r--db/ops/query.cpp15
-rw-r--r--db/queryoptimizer.cpp2
-rw-r--r--db/queryoptimizercursor.cpp80
-rw-r--r--db/querypattern.h3
-rw-r--r--db/queryutil.cpp11
-rw-r--r--db/repl/connections.h16
-rw-r--r--db/repl/rs_config.cpp23
-rw-r--r--db/repl/rs_config.h11
-rw-r--r--db/repl/rs_rollback.cpp18
-rw-r--r--db/repl/rs_sync.cpp2
-rw-r--r--db/repl_block.cpp2
-rw-r--r--db/security.cpp7
-rw-r--r--db/security_common.h2
19 files changed, 185 insertions, 93 deletions
diff --git a/db/cloner.cpp b/db/cloner.cpp
index f13ea52..26c2f74 100644
--- a/db/cloner.cpp
+++ b/db/cloner.cpp
@@ -68,7 +68,7 @@ namespace mongo {
/** copy the entire database */
bool go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk, bool useReplAuth, bool snapshot, bool mayYield, bool mayBeInterrupted, int *errCode = 0);
- bool copyCollection( const string& from , const string& ns , const BSONObj& query , string& errmsg , bool mayYield, bool mayBeInterrupted, bool copyIndexes = true, bool logForRepl = true );
+ bool copyCollection( const string& ns , const BSONObj& query , string& errmsg , bool mayYield, bool mayBeInterrupted, bool copyIndexes = true, bool logForRepl = true );
};
/* for index info object:
@@ -244,18 +244,19 @@ namespace mongo {
}
}
- bool copyCollectionFromRemote(const string& host, const string& ns, const BSONObj& query, string& errmsg, bool logForRepl, bool mayYield, bool mayBeInterrupted) {
+ bool copyCollectionFromRemote(const string& host, const string& ns, string& errmsg) {
Cloner c;
- return c.copyCollection(host, ns, query, errmsg, mayYield, mayBeInterrupted, /*copyIndexes*/ true, logForRepl);
- }
- bool Cloner::copyCollection( const string& from , const string& ns , const BSONObj& query , string& errmsg , bool mayYield, bool mayBeInterrupted, bool copyIndexes, bool logForRepl ) {
- auto_ptr<DBClientConnection> myconn;
- myconn.reset( new DBClientConnection() );
- if ( ! myconn->connect( from , errmsg ) )
- return false;
+ DBClientConnection *conn = new DBClientConnection();
+ // cloner owns conn in auto_ptr
+ c.setConnection(conn);
+ uassert(15908, errmsg, conn->connect(host, errmsg) && replAuthenticate(conn));
+
+ return c.copyCollection(ns, BSONObj(), errmsg, true, false, /*copyIndexes*/ true, false);
+ }
- conn.reset( myconn.release() );
+ bool Cloner::copyCollection( const string& ns, const BSONObj& query, string& errmsg,
+ bool mayYield, bool mayBeInterrupted, bool copyIndexes, bool logForRepl ) {
writelock lk(ns); // TODO: make this lower down
Client::Context ctx(ns);
@@ -265,7 +266,7 @@ namespace mongo {
string temp = ctx.db()->name + ".system.namespaces";
BSONObj config = conn->findOne( temp , BSON( "name" << ns ) );
if ( config["options"].isABSONObj() )
- if ( ! userCreateNS( ns.c_str() , config["options"].Obj() , errmsg, true , 0 ) )
+ if ( ! userCreateNS( ns.c_str() , config["options"].Obj() , errmsg, logForRepl , 0 ) )
return false;
}
@@ -521,7 +522,14 @@ namespace mongo {
<< " query: " << query << " " << ( copyIndexes ? "" : ", not copying indexes" ) << endl;
Cloner c;
- return c.copyCollection( fromhost , collection , query, errmsg , true, false, copyIndexes );
+ auto_ptr<DBClientConnection> myconn;
+ myconn.reset( new DBClientConnection() );
+ if ( ! myconn->connect( fromhost , errmsg ) )
+ return false;
+
+ c.setConnection( myconn.release() );
+
+ return c.copyCollection( collection , query, errmsg , true, false, copyIndexes );
}
} cmdclonecollection;
diff --git a/db/cloner.h b/db/cloner.h
index 94264f8..130fea0 100644
--- a/db/cloner.h
+++ b/db/cloner.h
@@ -34,6 +34,6 @@ namespace mongo {
bool slaveOk, bool useReplAuth, bool snapshot, bool mayYield,
bool mayBeInterrupted, int *errCode = 0);
- bool copyCollectionFromRemote(const string& host, const string& ns, const BSONObj& query, string& errmsg, bool logForRepl, bool mayYield, bool mayBeInterrupted);
+ bool copyCollectionFromRemote(const string& host, const string& ns, string& errmsg);
} // namespace mongo
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index b2e6218..fc6327c 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -514,7 +514,15 @@ namespace mongo {
if( overhead > 4000 ) {
t.append("note", "virtual minus mapped is large. could indicate a memory leak");
- log() << "warning: virtual size (" << v << "MB) - mapped size (" << m << "MB) is large (" << overhead << "MB). could indicate a memory leak" << endl;
+
+ static time_t last = 0;
+ time_t now = time(0);
+
+ if ( last + 60 < now ) {
+ last = now;
+ log() << "warning: virtual size (" << v << "MB) - mapped size (" << m << "MB) is large (" << overhead << "MB). could indicate a memory leak" << endl;
+ }
+
}
t.done();
diff --git a/db/dbcommands_generic.cpp b/db/dbcommands_generic.cpp
index 22cee22..c623574 100644
--- a/db/dbcommands_generic.cpp
+++ b/db/dbcommands_generic.cpp
@@ -334,7 +334,7 @@ namespace mongo {
virtual bool slaveOk() const { return true; }
virtual LockType locktype() const { return NONE; }
- virtual bool requiresAuth() { return false; }
+ virtual bool requiresAuth() { return true; }
virtual bool adminOnly() const { return true; }
virtual void help( stringstream& help ) const {
diff --git a/db/instance.cpp b/db/instance.cpp
index 764571d..1d5d589 100644
--- a/db/instance.cpp
+++ b/db/instance.cpp
@@ -605,6 +605,8 @@ namespace mongo {
break;
js = d.nextJsObj(); // TODO: refactor to do objcheck outside of writelock
}
+
+ globalOpCounters.incInsertInWriteLock(n);
}
void receivedInsert(Message& m, CurOp& op) {
diff --git a/db/oplog.cpp b/db/oplog.cpp
index 5c1671c..6e62607 100644
--- a/db/oplog.cpp
+++ b/db/oplog.cpp
@@ -627,12 +627,22 @@ namespace mongo {
bool shouldRetry(const BSONObj& o, const string& hn) {
OplogReader missingObjReader;
+ const char *ns = o.getStringField("ns");
+
+ // capped collections
+ NamespaceDetails *nsd = nsdetails(ns);
+ if (nsd && nsd->capped) {
+ log() << "replication missing doc, but this is okay for a capped collection (" << ns << ")" << endl;
+ return false;
+ }
+
+ // should already have write lock
+ Client::Context ctx(ns);
// we don't have the object yet, which is possible on initial sync. get it.
log() << "replication info adding missing object" << endl; // rare enough we can log
uassert(15916, str::stream() << "Can no longer connect to initial sync source: " << hn, missingObjReader.connect(hn));
- const char *ns = o.getStringField("ns");
// might be more than just _id in the update criteria
BSONObj query = BSONObjBuilder().append(o.getObjectField("o2")["_id"]).obj();
BSONObj missingObj;
@@ -651,7 +661,6 @@ namespace mongo {
return false;
}
else {
- Client::Context ctx(ns);
DiskLoc d = theDataFileMgr.insert(ns, (void*) missingObj.objdata(), missingObj.objsize());
uassert(15917, "Got bad disk location when attempting to insert", !d.isNull());
@@ -678,6 +687,7 @@ namespace mongo {
o = fields[0].embeddedObject();
const char *ns = fields[1].valuestrsafe();
+ NamespaceDetails *nsd = nsdetails(ns);
// operation type -- see logOp() comments for types
const char *opType = fields[2].valuestrsafe();
@@ -705,7 +715,7 @@ namespace mongo {
}
else {
/* erh 10/16/2009 - this is probably not relevant any more since its auto-created, but not worth removing */
- RARELY ensureHaveIdIndex(ns); // otherwise updates will be slow
+ RARELY if (nsd && !nsd->capped) { ensureHaveIdIndex(ns); } // otherwise updates will be slow
/* todo : it may be better to do an insert here, and then catch the dup key exception and do update
then. very few upserts will not be inserts...
@@ -722,9 +732,9 @@ namespace mongo {
// - if not, updates would be slow
// - but if were by id would be slow on primary too so maybe ok
// - if on primary was by another key and there are other indexes, this could be very bad w/out an index
- // - if do create, odd to have on secondary but not primary. also can cause secondary to block for
- // quite a while on creation.
- RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
+ // - if do create, odd to have on secondary but not primary. also can cause secondary to block for
+ // quite a while on creation.
+ RARELY if (nsd && !nsd->capped) { ensureHaveIdIndex(ns); } // otherwise updates will be super slow
OpDebug debug;
BSONObj updateCriteria = op.getObjectField("o2");
bool upsert = fields[3].booleanSafe();
@@ -741,11 +751,17 @@ namespace mongo {
// of the form
// { _id:..., { x : {$size:...} }
// thus this is not ideal.
- else if( nsdetails(ns) == NULL || Helpers::findById(nsdetails(ns), updateCriteria).isNull() ) {
- failedUpdate = true;
- }
- else {
- // it's present; zero objects were updated because of additional specifiers in the query for idempotence
+ else {
+
+ if (nsd == NULL ||
+ (nsd->findIdIndex() >= 0 && Helpers::findById(nsd, updateCriteria).isNull()) ||
+ // capped collections won't have an _id index
+ (nsd->findIdIndex() < 0 && Helpers::findOne(ns, updateCriteria, false).isNull())) {
+ failedUpdate = true;
+ }
+
+ // Otherwise, it's present; zero objects were updated because of additional specifiers
+ // in the query for idempotence
}
}
else {
diff --git a/db/ops/query.cpp b/db/ops/query.cpp
index cf4dc98..36f2536 100644
--- a/db/ops/query.cpp
+++ b/db/ops/query.cpp
@@ -221,7 +221,8 @@ namespace mongo {
_skip( spec["skip"].numberLong() ),
_limit( spec["limit"].numberLong() ),
_nscanned(),
- _bc() {
+ _bc(),
+ _yieldRecoveryFailed() {
}
virtual void _init() {
@@ -251,6 +252,7 @@ namespace mongo {
virtual void recoverFromYield() {
if ( _cc && !ClientCursor::recoverFromYield( _yieldData ) ) {
+ _yieldRecoveryFailed = true;
_c.reset();
_cc.reset();
@@ -309,7 +311,7 @@ namespace mongo {
}
long long count() const { return _count; }
virtual bool mayRecordPlan() const {
- return ( _myCount > _limit / 2 ) || ( complete() && !stopRequested() );
+ return !_yieldRecoveryFailed && ( ( _myCount > _limit / 2 ) || ( complete() && !stopRequested() ) );
}
private:
@@ -343,6 +345,7 @@ namespace mongo {
ClientCursor::CleanupPointer _cc;
ClientCursor::YieldData _yieldData;
+ bool _yieldRecoveryFailed;
};
/* { count: "collectionname"[, query: <query>] }
@@ -474,7 +477,8 @@ namespace mongo {
_oplogReplay( pq.hasOption( QueryOption_OplogReplay) ),
_response( response ),
_eb( eb ),
- _curop( curop )
+ _curop( curop ),
+ _yieldRecoveryFailed()
{}
virtual void _init() {
@@ -531,6 +535,7 @@ namespace mongo {
_findingStartCursor->recoverFromYield();
}
else if ( _cc && !ClientCursor::recoverFromYield( _yieldData ) ) {
+ _yieldRecoveryFailed = true;
_c.reset();
_cc.reset();
_so.reset();
@@ -723,7 +728,7 @@ namespace mongo {
}
virtual bool mayRecordPlan() const {
- return ( _pq.getNumToReturn() != 1 ) && ( ( _n > _pq.getNumToReturn() / 2 ) || ( complete() && !stopRequested() ) );
+ return !_yieldRecoveryFailed && ( _pq.getNumToReturn() != 1 ) && ( ( _n > _pq.getNumToReturn() / 2 ) || ( complete() && !stopRequested() ) );
}
virtual QueryOp *_createChild() const {
@@ -791,6 +796,8 @@ namespace mongo {
ExplainBuilder &_eb;
CurOp &_curop;
OpTime _slaveReadTill;
+
+ bool _yieldRecoveryFailed;
};
/* run a query -- includes checking for and running a Command \
diff --git a/db/queryoptimizer.cpp b/db/queryoptimizer.cpp
index 692e9fd..71ca657 100644
--- a/db/queryoptimizer.cpp
+++ b/db/queryoptimizer.cpp
@@ -482,7 +482,7 @@ doneCheckOrder:
}
massert( 10368 , "Unable to locate previously recorded index", p.get() );
- if ( !( _bestGuessOnly && p->scanAndOrderRequired() ) ) {
+ if ( !p->unhelpful() && !( _bestGuessOnly && p->scanAndOrderRequired() ) ) {
_usingPrerecordedPlan = true;
_mayRecordPlan = false;
_plans.push_back( p );
diff --git a/db/queryoptimizercursor.cpp b/db/queryoptimizercursor.cpp
index 9260889..f8b57f7 100644
--- a/db/queryoptimizercursor.cpp
+++ b/db/queryoptimizercursor.cpp
@@ -35,7 +35,9 @@ namespace mongo {
* @param aggregateNscanned - shared int counting total nscanned for
* query ops for all cursors.
*/
- QueryOptimizerCursorOp( long long &aggregateNscanned ) : _matchCount(), _mustAdvance(), _nscanned(), _aggregateNscanned( aggregateNscanned ) {}
+ QueryOptimizerCursorOp( long long &aggregateNscanned ) :
+ _matchCount(), _mustAdvance(), _nscanned(), _capped(),
+ _aggregateNscanned( aggregateNscanned ), _yieldRecoveryFailed() {}
virtual void _init() {
if ( qp().scanAndOrderRequired() ) {
@@ -64,6 +66,7 @@ namespace mongo {
virtual void recoverFromYield() {
if ( _cc && !ClientCursor::recoverFromYield( _yieldData ) ) {
+ _yieldRecoveryFailed = true;
_c.reset();
_cc.reset();
@@ -113,12 +116,15 @@ namespace mongo {
DiskLoc currLoc() const { return _c ? _c->currLoc() : DiskLoc(); }
BSONObj currKey() const { return _c ? _c->currKey() : BSONObj(); }
virtual bool mayRecordPlan() const {
- return complete() && !stopRequested();
+ return !_yieldRecoveryFailed && complete() && !stopRequested();
}
shared_ptr<Cursor> cursor() const { return _c; }
private:
void mayAdvance() {
- if ( _mustAdvance && _c ) {
+ if ( !_c ) {
+ return;
+ }
+ if ( _mustAdvance ) {
_c->advance();
_mustAdvance = false;
}
@@ -134,6 +140,7 @@ namespace mongo {
DiskLoc _posBeforeYield;
ClientCursor::YieldData _yieldData;
long long &_aggregateNscanned;
+ bool _yieldRecoveryFailed;
};
/**
@@ -181,36 +188,7 @@ namespace mongo {
return DiskLoc();
}
virtual bool advance() {
- if ( _takeover ) {
- return _takeover->advance();
- }
-
- // Ok to advance if currOp in an error state due to failed yield recovery.
- // This may be the case when advance() is called by recoverFromYield().
- if ( !( _currOp && _currOp->error() ) && !ok() ) {
- return false;
- }
-
- _currOp = 0;
- shared_ptr<QueryOp> op = _mps->nextOp();
- rethrowOnError( op );
-
- QueryOptimizerCursorOp *qocop = dynamic_cast<QueryOptimizerCursorOp*>( op.get() );
- if ( !op->complete() ) {
- // 'qocop' will be valid until we call _mps->nextOp() again.
- _currOp = qocop;
- }
- else if ( op->stopRequested() ) {
- if ( qocop->cursor() ) {
- _takeover.reset( new MultiCursor( _mps,
- qocop->cursor(),
- op->matcher( qocop->cursor() ),
- *op,
- _nscanned - qocop->cursor()->nscanned() ) );
- }
- }
-
- return ok();
+ return _advance( false );
}
virtual BSONObj currKey() const {
if ( _takeover ) {
@@ -252,9 +230,9 @@ namespace mongo {
}
if ( _currOp ) {
_mps->recoverFromYield();
- if ( _currOp->error() ) {
- // See if we can advance to a non error op.
- advance();
+ if ( _currOp->error() || !ok() ) {
+ // Advance to a non error op or a following $or clause if possible.
+ _advance( true );
}
}
}
@@ -304,6 +282,36 @@ namespace mongo {
}
private:
+ bool _advance( bool force ) {
+ if ( _takeover ) {
+ return _takeover->advance();
+ }
+
+ if ( !force && !ok() ) {
+ return false;
+ }
+
+ _currOp = 0;
+ shared_ptr<QueryOp> op = _mps->nextOp();
+ rethrowOnError( op );
+
+ QueryOptimizerCursorOp *qocop = dynamic_cast<QueryOptimizerCursorOp*>( op.get() );
+ if ( !op->complete() ) {
+ // 'qocop' will be valid until we call _mps->nextOp() again.
+ _currOp = qocop;
+ }
+ else if ( op->stopRequested() ) {
+ if ( qocop->cursor() ) {
+ _takeover.reset( new MultiCursor( _mps,
+ qocop->cursor(),
+ op->matcher( qocop->cursor() ),
+ *op,
+ _nscanned - qocop->cursor()->nscanned() ) );
+ }
+ }
+
+ return ok();
+ }
void rethrowOnError( const shared_ptr< QueryOp > &op ) {
// If all plans have erred out, assert.
if ( op->error() ) {
diff --git a/db/querypattern.h b/db/querypattern.h
index d87cc64..2f7450e 100644
--- a/db/querypattern.h
+++ b/db/querypattern.h
@@ -36,7 +36,8 @@ namespace mongo {
Equality,
LowerBound,
UpperBound,
- UpperAndLowerBound
+ UpperAndLowerBound,
+ ConstraintPresent
};
bool operator<( const QueryPattern &other ) const;
/** for testing only */
diff --git a/db/queryutil.cpp b/db/queryutil.cpp
index 717eac8..47f89ad 100644
--- a/db/queryutil.cpp
+++ b/db/queryutil.cpp
@@ -1007,6 +1007,8 @@ namespace mongo {
qp._fieldTypes[ i->first ] = QueryPattern::UpperBound;
else if ( lower )
qp._fieldTypes[ i->first ] = QueryPattern::LowerBound;
+ else
+ qp._fieldTypes[ i->first ] = QueryPattern::ConstraintPresent;
}
}
qp.setSort( sort );
@@ -1019,13 +1021,13 @@ namespace mongo {
BoundBuilders builders;
builders.push_back( make_pair( shared_ptr<BSONObjBuilder>( new BSONObjBuilder() ), shared_ptr<BSONObjBuilder>( new BSONObjBuilder() ) ) );
BSONObjIterator i( keyPattern );
- bool ineq = false; // until ineq is true, we are just dealing with equality and $in bounds
+ bool equalityOnly = true; // until equalityOnly is false, we are just dealing with equality (no range or $in querys).
while( i.more() ) {
BSONElement e = i.next();
const FieldRange &fr = range( e.fieldName() );
int number = (int) e.number(); // returns 0.0 if not numeric
bool forward = ( ( number >= 0 ? 1 : -1 ) * ( direction >= 0 ? 1 : -1 ) > 0 );
- if ( !ineq ) {
+ if ( equalityOnly ) {
if ( fr.equality() ) {
for( BoundBuilders::const_iterator j = builders.begin(); j != builders.end(); ++j ) {
j->first->appendAs( fr.min(), "" );
@@ -1033,9 +1035,8 @@ namespace mongo {
}
}
else {
- if ( !fr.inQuery() ) {
- ineq = true;
- }
+ equalityOnly = false;
+
BoundBuilders newBuilders;
const vector<FieldInterval> &intervals = fr.intervals();
for( BoundBuilders::const_iterator i = builders.begin(); i != builders.end(); ++i ) {
diff --git a/db/repl/connections.h b/db/repl/connections.h
index 61c581b..3ada71c 100644
--- a/db/repl/connections.h
+++ b/db/repl/connections.h
@@ -72,7 +72,8 @@ namespace mongo {
struct X {
mongo::mutex z;
DBClientConnection cc;
- X() : z("X"), cc(/*reconnect*/ true, 0, /*timeout*/ 10.0) {
+ bool connected;
+ X() : z("X"), cc(/*reconnect*/ true, 0, /*timeout*/ 10.0), connected(false) {
cc._logLevel = 2;
}
} *x;
@@ -88,6 +89,7 @@ namespace mongo {
log() << "couldn't connect to " << _hostport << ": " << err << rsLog;
return false;
}
+ x->connected = true;
// if we cannot authenticate against a member, then either its key file
// or our key file has to change. if our key file has to change, we'll
@@ -113,11 +115,19 @@ namespace mongo {
connLock.reset( new scoped_lock(x->z) );
}
}
- if( !first ) {
- connLock.reset( new scoped_lock(x->z) );
+
+ // already locked connLock above
+ if (first) {
+ connect();
+ return;
+ }
+
+ connLock.reset( new scoped_lock(x->z) );
+ if (x->connected) {
return;
}
+ // Keep trying to connect if we're not yet connected
connect();
}
diff --git a/db/repl/rs_config.cpp b/db/repl/rs_config.cpp
index 13352b1..c451d46 100644
--- a/db/repl/rs_config.cpp
+++ b/db/repl/rs_config.cpp
@@ -296,6 +296,26 @@ namespace mongo {
_ok = false;
}
+ void ReplSetConfig::setMajority() {
+ int total = members.size();
+ int nonArbiters = total;
+ int strictMajority = total/2+1;
+
+ for (vector<MemberCfg>::iterator it = members.begin(); it < members.end(); it++) {
+ if ((*it).arbiterOnly) {
+ nonArbiters--;
+ }
+ }
+
+ // majority should be all "normal" members if we have something like 4
+ // arbiters & 3 normal members
+ _majority = (strictMajority > nonArbiters) ? nonArbiters : strictMajority;
+ }
+
+ int ReplSetConfig::getMajority() const {
+ return _majority;
+ }
+
void ReplSetConfig::checkRsConfig() const {
uassert(13132,
"nonmatching repl set name in _id field; check --replSet command line",
@@ -533,6 +553,9 @@ namespace mongo {
try { getLastErrorDefaults = settings["getLastErrorDefaults"].Obj().copy(); }
catch(...) { }
}
+
+ // figure out the majority for this config
+ setMajority();
}
static inline void configAssert(bool expr) {
diff --git a/db/repl/rs_config.h b/db/repl/rs_config.h
index b22b61e..da6552a 100644
--- a/db/repl/rs_config.h
+++ b/db/repl/rs_config.h
@@ -135,9 +135,20 @@ namespace mongo {
BSONObj asBson() const;
+ /**
+ * Getter and setter for _majority. This is almost always
+ * members.size()/2+1, but can be the number of non-arbiter members if
+ * there are more arbiters than non-arbiters (writing to 3 out of 7
+ * servers is safe if 4 of the servers are arbiters).
+ */
+ void setMajority();
+ int getMajority() const;
+
bool _constructed;
private:
bool _ok;
+ int _majority;
+
void from(BSONObj);
void clear();
diff --git a/db/repl/rs_rollback.cpp b/db/repl/rs_rollback.cpp
index f012e65..97a910e 100644
--- a/db/repl/rs_rollback.cpp
+++ b/db/repl/rs_rollback.cpp
@@ -388,24 +388,18 @@ namespace mongo {
for( set<string>::iterator i = h.collectionsToResync.begin(); i != h.collectionsToResync.end(); i++ ) {
string ns = *i;
sethbmsg(str::stream() << "rollback 4.1 coll resync " << ns);
- Client::Context c(*i);
- try {
+
+ Client::Context c(ns);
+ {
bob res;
string errmsg;
dropCollection(ns, errmsg, res);
{
dbtemprelease r;
- bool ok = copyCollectionFromRemote(them->getServerAddress(), ns, bo(), errmsg, false, true, false);
- if( !ok ) {
- log() << "replSet rollback error resyncing collection " << ns << ' ' << errmsg << rsLog;
- throw "rollback error resyncing rollection [1]";
- }
+ bool ok = copyCollectionFromRemote(them->getServerAddress(), ns, errmsg);
+ uassert(15909, str::stream() << "replSet rollback error resyncing collection " << ns << ' ' << errmsg, ok);
}
}
- catch(...) {
- log() << "replset rollback error resyncing collection " << ns << rsLog;
- throw "rollback error resyncing rollection [2]";
- }
}
/* we did more reading from primary, so check it again for a rollback (which would mess us up), and
@@ -423,7 +417,7 @@ namespace mongo {
setMinValid(newMinValid);
}
}
- catch(...) {
+ catch (DBException& e) {
err = "can't get/set minvalid";
}
if( h.rbid != getRBID(r.conn()) ) {
diff --git a/db/repl/rs_sync.cpp b/db/repl/rs_sync.cpp
index 8cd3e14..c86dbbb 100644
--- a/db/repl/rs_sync.cpp
+++ b/db/repl/rs_sync.cpp
@@ -161,7 +161,7 @@ namespace mongo {
}
catch (DBException& e) {
// skip duplicate key exceptions
- if( e.getCode() == 11000 || e.getCode() == 11001 ) {
+ if( e.getCode() == 11000 || e.getCode() == 11001 || e.getCode() == 12582) {
continue;
}
diff --git a/db/repl_block.cpp b/db/repl_block.cpp
index dcac121..840bbb2 100644
--- a/db/repl_block.cpp
+++ b/db/repl_block.cpp
@@ -175,7 +175,7 @@ namespace mongo {
if (wStr == "majority") {
// use the entire set, including arbiters, to prevent writing
// to a majority of the set but not a majority of voters
- return replicatedToNum(op, theReplSet->config().members.size()/2+1);
+ return replicatedToNum(op, theReplSet->config().getMajority());
}
map<string,ReplSetConfig::TagRule*>::const_iterator it = theReplSet->config().rules.find(wStr);
diff --git a/db/security.cpp b/db/security.cpp
index ae14770..05165cb 100644
--- a/db/security.cpp
+++ b/db/security.cpp
@@ -83,6 +83,9 @@ namespace mongo {
string systemUsers = dbname + ".system.users";
// OCCASIONALLY Helpers::ensureIndex(systemUsers.c_str(), userPattern, false, "user_1");
{
+ mongolock lk(false);
+ Client::Context c(systemUsers, dbpath, &lk, false);
+
BSONObjBuilder b;
b << "user" << user;
BSONObj query = b.done();
@@ -101,10 +104,10 @@ namespace mongo {
AuthenticationInfo *ai = cc().getAuthenticationInfo();
if ( readOnly ) {
- ai->authorizeReadOnly( cc().database()->name.c_str() , user );
+ ai->authorizeReadOnly( dbname.c_str() , user );
}
else {
- ai->authorize( cc().database()->name.c_str() , user );
+ ai->authorize( dbname.c_str() , user );
}
}
diff --git a/db/security_common.h b/db/security_common.h
index 2f2565f..c9a3e3a 100644
--- a/db/security_common.h
+++ b/db/security_common.h
@@ -57,7 +57,7 @@ namespace mongo {
virtual bool slaveOk() const {
return true;
}
- virtual LockType locktype() const { return READ; }
+ virtual LockType locktype() const { return NONE; }
virtual void help(stringstream& ss) const { ss << "internal"; }
CmdAuthenticate() : Command("authenticate") {}
bool run(const string& dbname , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl);