diff options
Diffstat (limited to 'db')
-rw-r--r-- | db/btree.cpp | 33 | ||||
-rw-r--r-- | db/client.cpp | 17 | ||||
-rw-r--r-- | db/client.h | 2 | ||||
-rw-r--r-- | db/clientcursor.cpp | 1 | ||||
-rw-r--r-- | db/cmdline.cpp | 1 | ||||
-rw-r--r-- | db/curop.h | 10 | ||||
-rw-r--r-- | db/db.cpp | 2 | ||||
-rw-r--r-- | db/db.h | 5 | ||||
-rw-r--r-- | db/dbcommands.cpp | 33 | ||||
-rw-r--r-- | db/dbwebserver.cpp | 5 | ||||
-rw-r--r-- | db/index_geo2d.cpp | 35 | ||||
-rw-r--r-- | db/lasterror.cpp | 10 | ||||
-rw-r--r-- | db/pdfile.cpp | 16 | ||||
-rw-r--r-- | db/repl.cpp | 9 | ||||
-rw-r--r-- | db/repl.h | 5 | ||||
-rw-r--r-- | db/update.cpp | 19 | ||||
-rw-r--r-- | db/update.h | 12 |
17 files changed, 154 insertions, 61 deletions
diff --git a/db/btree.cpp b/db/btree.cpp index 18f9e76..0c8ca28 100644 --- a/db/btree.cpp +++ b/db/btree.cpp @@ -665,13 +665,18 @@ found: if ( split_debug ) out() << " " << thisLoc.toString() << ".split" << endl; - int mid = n / 2; + int split = n / 2; + if ( keypos == n ) { // see SERVER-983 + split = 0.9 * n; + if ( split > n - 2 ) + split = n - 2; + } DiskLoc rLoc = addBucket(idx); BtreeBucket *r = rLoc.btreemod(); if ( split_debug ) - out() << " mid:" << mid << ' ' << keyNode(mid).key.toString() << " n:" << n << endl; - for ( int i = mid+1; i < n; i++ ) { + out() << " split:" << split << ' ' << keyNode(split).key.toString() << " n:" << n << endl; + for ( int i = split+1; i < n; i++ ) { KeyNode kn = keyNode(i); r->pushBack(kn.recordLoc, kn.key, order, kn.prevChildBucket); } @@ -684,18 +689,18 @@ found: rLoc.btree()->fixParentPtrs(rLoc); { - KeyNode middle = keyNode(mid); - nextChild = middle.prevChildBucket; // middle key gets promoted, its children will be thisLoc (l) and rLoc (r) + KeyNode splitkey = keyNode(split); + nextChild = splitkey.prevChildBucket; // splitkey key gets promoted, its children will be thisLoc (l) and rLoc (r) if ( split_debug ) { - out() << " middle key:" << middle.key.toString() << endl; + out() << " splitkey key:" << splitkey.key.toString() << endl; } - // promote middle to a parent node + // promote splitkey to a parent node if ( parent.isNull() ) { // make a new parent if we were the root DiskLoc L = addBucket(idx); BtreeBucket *p = L.btreemod(); - p->pushBack(middle.recordLoc, middle.key, order, thisLoc); + p->pushBack(splitkey.recordLoc, splitkey.key, order, thisLoc); p->nextChild = rLoc; p->assertValid( order ); parent = idx.head = L; @@ -708,22 +713,22 @@ found: */ rLoc.btreemod()->parent = parent; if ( split_debug ) - out() << " promoting middle key " << middle.key.toString() << endl; - parent.btree()->_insert(parent, middle.recordLoc, middle.key, order, /*dupsallowed*/true, thisLoc, rLoc, idx); + out() << " promoting splitkey key " << splitkey.key.toString() << endl; + parent.btree()->_insert(parent, splitkey.recordLoc, splitkey.key, order, /*dupsallowed*/true, thisLoc, rLoc, idx); } } - truncateTo(mid, order); // note this may trash middle.key. thus we had to promote it before finishing up here. + truncateTo(split, order); // note this may trash splitkey.key. thus we had to promote it before finishing up here. // add our new key, there is room now { - if ( keypos <= mid ) { + if ( keypos <= split ) { if ( split_debug ) - out() << " keypos<mid, insertHere() the new key" << endl; + out() << " keypos<split, insertHere() the new key" << endl; insertHere(thisLoc, keypos, recordLoc, key, order, lchild, rchild, idx); } else { - int kp = keypos-mid-1; + int kp = keypos-split-1; assert(kp>=0); rLoc.btree()->insertHere(rLoc, kp, recordLoc, key, order, lchild, rchild, idx); } diff --git a/db/client.cpp b/db/client.cpp index dc82a25..a2fe568 100644 --- a/db/client.cpp +++ b/db/client.cpp @@ -245,4 +245,21 @@ namespace mongo { return b.obj(); } + int Client::recommendedYieldMicros(){ + int num = 0; + { + scoped_lock bl(clientsMutex); + num = clients.size(); + } + + if ( --num <= 0 ) // -- is for myself + return 0; + + if ( num > 50 ) + num = 50; + + num *= 100; + return num; + } + } diff --git a/db/client.h b/db/client.h index ab43509..c484198 100644 --- a/db/client.h +++ b/db/client.h @@ -45,6 +45,8 @@ namespace mongo { static mongo::mutex clientsMutex; static set<Client*> clients; // always be in clientsMutex when manipulating this + static int recommendedYieldMicros(); + class GodScope { bool _prev; public: diff --git a/db/clientcursor.cpp b/db/clientcursor.cpp index be0bd2f..1281fc3 100644 --- a/db/clientcursor.cpp +++ b/db/clientcursor.cpp @@ -232,6 +232,7 @@ namespace mongo { { dbtempreleasecond unlock; + sleepmicros( Client::recommendedYieldMicros() ); } if ( ClientCursor::find( id , false ) == 0 ){ diff --git a/db/cmdline.cpp b/db/cmdline.cpp index 59eafdd..2d15279 100644 --- a/db/cmdline.cpp +++ b/db/cmdline.cpp @@ -23,7 +23,6 @@ namespace po = boost::program_options; namespace mongo { - CmdLine cmdLine; void setupSignals(); BSONArray argvArray; @@ -108,15 +108,19 @@ namespace mongo { Top::global.record( _ns , _op , _lockType , now - _checkpoint , _command ); _checkpoint = now; } - - void reset( const sockaddr_in & remote, int op ) { + + void reset(){ _reset(); _start = _checkpoint = 0; _active = true; _opNum = _nextOpNum++; _ns[0] = '?'; // just in case not set later _debug.reset(); - resetQuery(); + resetQuery(); + } + + void reset( const sockaddr_in & remote, int op ) { + reset(); _remote = remote; _op = op; } @@ -45,6 +45,8 @@ namespace mongo { + CmdLine cmdLine; + bool useJNI = true; /* only off if --nocursors which is for debugging. */ @@ -141,8 +141,11 @@ namespace mongo { string _todb( const string& ns ) const { size_t i = ns.find( '.' ); - if ( i == string::npos ) + if ( i == string::npos ){ + uassert( 13074 , "db name can't be empty" , ns.size() ); return ns; + } + uassert( 13075 , "db name can't be empty" , i > 0 ); return ns.substr( 0 , i ); } diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp index 6d1aa5a..46c2e4d 100644 --- a/db/dbcommands.cpp +++ b/db/dbcommands.cpp @@ -1020,19 +1020,26 @@ namespace mongo { namespace { long long getIndexSizeForCollection(string db, string ns, BSONObjBuilder* details=NULL, int scale = 1 ){ - DBDirectClient client; - auto_ptr<DBClientCursor> indexes = - client.query(db + ".system.indexes", QUERY( "ns" << ns)); - - long long totalSize = 0; - while (indexes->more()){ - BSONObj index = indexes->nextSafe(); - NamespaceDetails * nsd = nsdetails( (ns + ".$" + index["name"].valuestrsafe()).c_str() ); - if (!nsd) - continue; // nothing to do here - totalSize += nsd->datasize; - if (details) - details->appendNumber(index["name"].valuestrsafe(), nsd->datasize / scale ); + dbMutex.assertAtLeastReadLocked(); + + NamespaceDetails * nsd = nsdetails( ns.c_str() ); + if ( ! nsd ) + return 0; + + long long totalSize = 0; + + NamespaceDetails::IndexIterator ii = nsd->ii(); + while ( ii.more() ){ + IndexDetails& d = ii.next(); + string collNS = d.indexNamespace(); + NamespaceDetails * mine = nsdetails( collNS.c_str() ); + if ( ! mine ){ + log() << "error: have index [" << collNS << "] but no NamespaceDetails" << endl; + continue; + } + totalSize += mine->datasize; + if ( details ) + details->appendNumber( d.indexName() , mine->datasize / scale ); } return totalSize; } diff --git a/db/dbwebserver.cpp b/db/dbwebserver.cpp index 75d3a92..c55c8a6 100644 --- a/db/dbwebserver.cpp +++ b/db/dbwebserver.cpp @@ -241,6 +241,8 @@ namespace mongo { if ( from.localhost() ) return true; + Client::GodScope gs; + if ( db.findOne( "admin.system.users" , BSONObj() , 0 , QueryOption_SlaveOk ).isEmpty() ) return true; @@ -315,6 +317,7 @@ namespace mongo { responseMsg = "not allowed\n"; return; } + headers.push_back( "Content-Type: application/json" ); generateServerStatus( url , responseMsg ); responseCode = 200; return; @@ -519,7 +522,7 @@ namespace mongo { BSONObj query = queryBuilder.obj(); auto_ptr<DBClientCursor> cursor = db.query( ns.c_str() , query, num , skip ); - + uassert( 13085 , "query failed for dbwebserver" , cursor.get() ); if ( one ) { if ( cursor->more() ) { BSONObj obj = cursor->next(); diff --git a/db/index_geo2d.cpp b/db/index_geo2d.cpp index 4730c29..5ebf65a 100644 --- a/db/index_geo2d.cpp +++ b/db/index_geo2d.cpp @@ -893,14 +893,14 @@ namespace mongo { public: typedef multiset<GeoPoint> Holder; - GeoHopper( const Geo2dType * g , unsigned max , const GeoHash& n , const BSONObj& filter = BSONObj() ) - : GeoAccumulator( g , filter ) , _max( max ) , _near( n ) { + GeoHopper( const Geo2dType * g , unsigned max , const GeoHash& n , const BSONObj& filter = BSONObj() , double maxDistance = numeric_limits<double>::max() ) + : GeoAccumulator( g , filter ) , _max( max ) , _near( n ), _maxDistance( maxDistance ) { } virtual bool checkDistance( const GeoHash& h , double& d ){ d = _g->distance( _near , h ); - bool good = _points.size() < _max || d < farthest(); + bool good = d < _maxDistance && ( _points.size() < _max || d < farthest() ); GEODEBUG( "\t\t\t\t\t\t\t checkDistance " << _near << "\t" << h << "\t" << d << " ok: " << good << " farthest: " << farthest() ); return good; @@ -926,6 +926,7 @@ namespace mongo { unsigned _max; GeoHash _near; Holder _points; + double _maxDistance; }; @@ -999,10 +1000,10 @@ namespace mongo { class GeoSearch { public: - GeoSearch( const Geo2dType * g , const GeoHash& n , int numWanted=100 , BSONObj filter=BSONObj() ) + GeoSearch( const Geo2dType * g , const GeoHash& n , int numWanted=100 , BSONObj filter=BSONObj() , double maxDistance = numeric_limits<double>::max() ) : _spec( g ) , _n( n ) , _start( n ) , - _numWanted( numWanted ) , _filter( filter ) , - _hopper( new GeoHopper( g , numWanted , n , filter ) ) + _numWanted( numWanted ) , _filter( filter ) , _maxDistance( maxDistance ) , + _hopper( new GeoHopper( g , numWanted , n , filter , maxDistance ) ) { assert( g->getDetails() ); _nscanned = 0; @@ -1042,6 +1043,10 @@ namespace mongo { if ( ! _prefix.constrains() ) break; _prefix = _prefix.up(); + + double temp = _spec->distance( _prefix , _start ); + if ( temp > ( _maxDistance * 2 ) ) + break; } } GEODEBUG( "done part 1" ); @@ -1105,6 +1110,7 @@ namespace mongo { GeoHash _prefix; int _numWanted; BSONObj _filter; + double _maxDistance; shared_ptr<GeoHopper> _hopper; long long _nscanned; @@ -1478,7 +1484,16 @@ namespace mongo { switch ( e.embeddedObject().firstElement().getGtLtOp() ){ case BSONObj::opNEAR: { e = e.embeddedObject().firstElement(); - shared_ptr<GeoSearch> s( new GeoSearch( this , _tohash(e) , numWanted , query ) ); + double maxDistance = numeric_limits<double>::max(); + if ( e.isABSONObj() && e.embeddedObject().nFields() > 2 ){ + BSONObjIterator i(e.embeddedObject()); + i.next(); + i.next(); + BSONElement e = i.next(); + if ( e.isNumber() ) + maxDistance = e.numberDouble(); + } + shared_ptr<GeoSearch> s( new GeoSearch( this , _tohash(e) , numWanted , query , maxDistance ) ); s->exec(); auto_ptr<Cursor> c; c.reset( new GeoSearchCursor( s ) ); @@ -1568,7 +1583,11 @@ namespace mongo { if ( cmdObj["query"].type() == Object ) filter = cmdObj["query"].embeddedObject(); - GeoSearch gs( g , n , numWanted , filter ); + double maxDistance = numeric_limits<double>::max(); + if ( cmdObj["maxDistance"].isNumber() ) + maxDistance = cmdObj["maxDistance"].number(); + + GeoSearch gs( g , n , numWanted , filter , maxDistance ); if ( cmdObj["start"].type() == String){ GeoHash start = (string) cmdObj["start"].valuestr(); diff --git a/db/lasterror.cpp b/db/lasterror.cpp index 9fefcfa..53042e7 100644 --- a/db/lasterror.cpp +++ b/db/lasterror.cpp @@ -72,8 +72,14 @@ namespace mongo { LastError * LastErrorHolder::_get( bool create ){ int id = _id.get(); - if ( id == 0 ) - return _tl.get(); + if ( id == 0 ){ + LastError * le = _tl.get(); + if ( ! le && create ){ + le = new LastError(); + _tl.reset( le ); + } + return le; + } scoped_lock lock(_idsmutex); map<int,Status>::iterator i = _ids.find( id ); diff --git a/db/pdfile.cpp b/db/pdfile.cpp index 1c4608c..e46ffb7 100644 --- a/db/pdfile.cpp +++ b/db/pdfile.cpp @@ -45,6 +45,8 @@ _ disallow system* manipulations from the database. namespace mongo { + const int MaxExtentSize = 0x7ff00000; + map<string, unsigned> BackgroundOperation::dbsInProg; set<string> BackgroundOperation::nsInProg; @@ -357,7 +359,7 @@ namespace mongo { Extent* MongoDataFile::createExtent(const char *ns, int approxSize, bool newCapped, int loops) { massert( 10357 , "shutdown in progress", !goingAway ); - massert( 10358 , "bad new extent size", approxSize >= 0 && approxSize <= 0x7ff00000 ); + massert( 10358 , "bad new extent size", approxSize >= 0 && approxSize <= MaxExtentSize ); massert( 10359 , "header==0 on new extent: 32 bit mmap space exceeded?", header ); // null if file open failed int ExtentSize = approxSize <= header->unusedLength ? approxSize : header->unusedLength; DiskLoc loc; @@ -919,11 +921,19 @@ namespace mongo { } int followupExtentSize(int len, int lastExtentLen) { + assert( len < MaxExtentSize ); int x = initialExtentSize(len); int y = (int) (lastExtentLen < 4000000 ? lastExtentLen * 4.0 : lastExtentLen * 1.2); int sz = y > x ? y : x; + + if ( sz < lastExtentLen ) + sz = lastExtentLen; + else if ( sz > MaxExtentSize ) + sz = MaxExtentSize; + sz = ((int)sz) & 0xffffff00; assert( sz > len ); + return sz; } @@ -1141,7 +1151,7 @@ namespace mongo { break; } } - progress.done(); + progress.finished(); return n; } @@ -1192,7 +1202,7 @@ namespace mongo { // throws DBException static void buildAnIndex(string ns, NamespaceDetails *d, IndexDetails& idx, int idxNo, bool background) { - log() << "building new index on " << idx.keyPattern() << " for " << ns << endl; + log() << "building new index on " << idx.keyPattern() << " for " << ns << ( background ? " background" : "" ) << endl; Timer t; unsigned long long n; diff --git a/db/repl.cpp b/db/repl.cpp index 62b2986..137c25f 100644 --- a/db/repl.cpp +++ b/db/repl.cpp @@ -102,7 +102,7 @@ namespace mongo { return; info = _comment; if ( n != state && !cmdLine.quiet ) - log() << "pair: setting master=" << n << " was " << state << '\n'; + log() << "pair: setting master=" << n << " was " << state << endl; state = n; } @@ -732,7 +732,7 @@ namespace mongo { ( replPair && replSettings.fastsync ) ) { DBDirectClient c; if ( c.exists( "local.oplog.$main" ) ) { - BSONObj op = c.findOne( "local.oplog.$main", Query().sort( BSON( "$natural" << -1 ) ) ); + BSONObj op = c.findOne( "local.oplog.$main", QUERY( "op" << NE << "n" ).sort( BSON( "$natural" << -1 ) ) ); if ( !op.isEmpty() ) { tmp.syncedTo = op[ "ts" ].date(); tmp._lastSavedLocalTs = op[ "ts" ].date(); @@ -938,6 +938,7 @@ namespace mongo { } Client::Context ctx( ns ); + ctx.getClient()->curop()->reset(); bool empty = ctx.db()->isEmpty(); bool incompleteClone = incompleteCloneDbs.count( clientName ) != 0; @@ -1606,6 +1607,7 @@ namespace mongo { ReplInfo r("replMain load sources"); dblock lk; ReplSource::loadAll(sources); + replSettings.fastsync = false; // only need this param for initial reset } if ( sources.empty() ) { @@ -1860,6 +1862,9 @@ namespace mongo { createOplog(); boost::thread t(replMasterThread); } + + while( replSettings.fastsync ) // don't allow writes until we've set up from log + sleepmillis( 50 ); } /* called from main at server startup */ @@ -205,7 +205,10 @@ namespace mongo { public: MemIds() : size_() {} friend class IdTracker; - void reset() { imp_.clear(); } + void reset() { + imp_.clear(); + size_ = 0; + } bool get( const char *ns, const BSONObj &id ) { return imp_[ ns ].count( id ); } void set( const char *ns, const BSONObj &id, bool val ) { if ( val ) { diff --git a/db/update.cpp b/db/update.cpp index d6a5c5e..7049fff 100644 --- a/db/update.cpp +++ b/db/update.cpp @@ -24,11 +24,11 @@ #include "repl.h" #include "update.h" +//#define DEBUGUPDATE(x) cout << x << endl; +#define DEBUGUPDATE(x) + namespace mongo { - //#define DEBUGUPDATE(x) cout << x << endl; -#define DEBUGUPDATE(x) - const char* Mod::modNames[] = { "$inc", "$set", "$push", "$pushAll", "$pull", "$pullAll" , "$pop", "$unset" , "$bitand" , "$bitor" , "$bit" , "$addToSet" }; unsigned Mod::modNamesNum = sizeof(Mod::modNames)/sizeof(char*); @@ -310,11 +310,12 @@ namespace mongo { // Perform this check first, so that we don't leave a partially modified object on uassert. for ( ModHolder::const_iterator i = _mods.begin(); i != _mods.end(); ++i ) { + DEBUGUPDATE( "\t\t prepare : " << i->first ); ModState& ms = mss->_mods[i->first]; const Mod& m = i->second; BSONElement e = obj.getFieldDotted(m.fieldName); - + ms.m = &m; ms.old = e; @@ -406,6 +407,7 @@ namespace mongo { mss->amIInPlacePossible( false ); } } + return auto_ptr<ModSetState>( mss ); } @@ -424,7 +426,7 @@ namespace mongo { // [dm] the BSONElementManipulator statements below are for replication (correct?) case Mod::INC: m.m->incrementMe( m.old ); - m.fixedName = "$set"; + m.fixedOpName = "$set"; m.fixed = &(m.old); break; case Mod::SET: @@ -477,6 +479,7 @@ namespace mongo { template< class Builder > void ModSetState::createNewFromMods( const string& root , Builder& b , const BSONObj &obj ){ + DEBUGUPDATE( "\t\t createNewFromMods root: " << root ); BSONObjIteratorSorted es( obj ); BSONElement e = es.next(); @@ -488,6 +491,8 @@ namespace mongo { while ( e.type() && m != mend ){ string field = root + e.fieldName(); FieldCompareResult cmp = compareDottedFieldNames( m->second.m->fieldName , field ); + + DEBUGUPDATE( "\t\t\t" << field << "\t" << m->second.m->fieldName << "\t" << cmp ); switch ( cmp ){ @@ -809,11 +814,13 @@ namespace mongo { const BSONObj& onDisk = loc.obj(); ModSet * useMods = mods.get(); + bool forceRewrite = false; auto_ptr<ModSet> mymodset; if ( u->getMatchDetails().elemMatchKey && mods->hasDynamicArray() ){ useMods = mods->fixDynamicArray( u->getMatchDetails().elemMatchKey ); mymodset.reset( useMods ); + forceRewrite = true; } @@ -850,7 +857,7 @@ namespace mongo { pattern = patternBuilder.obj(); } - if ( mss->needOpLogRewrite() ){ + if ( forceRewrite || mss->needOpLogRewrite() ){ DEBUGUPDATE( "\t rewrite update: " << mss->getOpLogRewrite() ); logOp("u", ns, mss->getOpLogRewrite() , &pattern ); } diff --git a/db/update.h b/db/update.h index e14b0fb..5d20114 100644 --- a/db/update.h +++ b/db/update.h @@ -327,7 +327,7 @@ namespace mongo { const Mod * m; BSONElement old; - const char * fixedName; + const char * fixedOpName; BSONElement * fixed; int pushStartSize; @@ -337,7 +337,7 @@ namespace mongo { long long inclong; ModState(){ - fixedName = 0; + fixedOpName = 0; fixed = 0; pushStartSize = -1; incType = EOO; @@ -352,7 +352,7 @@ namespace mongo { } bool needOpLogRewrite() const { - if ( fixed || fixedName || incType ) + if ( fixed || fixedOpName || incType ) return true; switch( op() ){ @@ -374,13 +374,13 @@ namespace mongo { return; } - const char * name = fixedName ? fixedName : Mod::modNames[op()]; + const char * name = fixedOpName ? fixedOpName : Mod::modNames[op()]; BSONObjBuilder bb( b.subobjStart( name ) ); if ( fixed ) bb.appendAs( *fixed , m->fieldName ); else - bb.append( m->elt ); + bb.appendAs( m->elt , m->fieldName ); bb.done(); } @@ -470,7 +470,7 @@ namespace mongo { break; case Mod::INC: - ms.fixedName = "$set"; + ms.fixedOpName = "$set"; case Mod::SET: { m._checkForAppending( m.elt ); b.appendAs( m.elt, m.shortFieldName ); |