summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--SConstruct6
-rw-r--r--buildscripts/hacks_ubuntu.py9
-rw-r--r--client/clientOnly.cpp2
-rw-r--r--client/dbclient.cpp2
-rw-r--r--db/btree.cpp33
-rw-r--r--db/client.cpp17
-rw-r--r--db/client.h2
-rw-r--r--db/clientcursor.cpp1
-rw-r--r--db/cmdline.cpp1
-rw-r--r--db/curop.h10
-rw-r--r--db/db.cpp2
-rw-r--r--db/db.h5
-rw-r--r--db/dbcommands.cpp33
-rw-r--r--db/dbwebserver.cpp5
-rw-r--r--db/index_geo2d.cpp35
-rw-r--r--db/lasterror.cpp10
-rw-r--r--db/pdfile.cpp16
-rw-r--r--db/repl.cpp9
-rw-r--r--db/repl.h5
-rw-r--r--db/update.cpp19
-rw-r--r--db/update.h12
-rw-r--r--dbtests/basictests.cpp5
-rw-r--r--dbtests/btreetests.cpp38
-rw-r--r--dbtests/framework.cpp2
-rw-r--r--dbtests/repltests.cpp3
-rw-r--r--debian/changelog14
-rw-r--r--debian/files1
-rw-r--r--debian/mongodb.upstart15
-rw-r--r--debian/preinst37
-rw-r--r--doxygenConfig2
-rw-r--r--jstests/geo2.js5
-rw-r--r--jstests/repl/basic1.js34
-rw-r--r--jstests/repl/snapshot3.js2
-rw-r--r--jstests/update_arraymatch2.js2
-rw-r--r--jstests/updated.js20
-rw-r--r--lib/libboost_thread-gcc41-mt-d-1_34_1.abin0 -> 692920 bytes
-rw-r--r--rpm/mongo.spec2
-rw-r--r--s/server.cpp3
-rw-r--r--scripting/engine_spidermonkey.h1
-rw-r--r--stdafx.cpp2
-rw-r--r--tools/tool.cpp3
-rw-r--r--util/assert_util.cpp1
-rw-r--r--util/goodies.h59
43 files changed, 384 insertions, 101 deletions
diff --git a/SConstruct b/SConstruct
index 8a17e69..8195f77 100644
--- a/SConstruct
+++ b/SConstruct
@@ -359,7 +359,7 @@ if GetOption( "extralib" ) is not None:
# ------ SOURCE FILE SETUP -----------
-commonFiles = Split( "stdafx.cpp buildinfo.cpp db/common.cpp db/jsobj.cpp db/json.cpp db/lasterror.cpp db/nonce.cpp db/queryutil.cpp db/cmdline.cpp shell/mongo.cpp" )
+commonFiles = Split( "stdafx.cpp buildinfo.cpp db/common.cpp db/jsobj.cpp db/json.cpp db/lasterror.cpp db/nonce.cpp db/queryutil.cpp shell/mongo.cpp" )
commonFiles += [ "util/background.cpp" , "util/mmap.cpp" , "util/sock.cpp" , "util/util.cpp" , "util/message.cpp" ,
"util/assert_util.cpp" , "util/httpclient.cpp" , "util/md5main.cpp" , "util/base64.cpp", "util/debug_util.cpp",
"util/thread_pool.cpp" ]
@@ -384,7 +384,7 @@ else:
coreDbFiles = [ "db/commands.cpp" ]
coreServerFiles = [ "util/message_server_port.cpp" , "util/message_server_asio.cpp" ]
-serverOnlyFiles = Split( "db/query.cpp db/update.cpp db/introspect.cpp db/btree.cpp db/clientcursor.cpp db/tests.cpp db/repl.cpp db/btreecursor.cpp db/cloner.cpp db/namespace.cpp db/matcher.cpp db/dbeval.cpp db/dbwebserver.cpp db/dbhelpers.cpp db/instance.cpp db/database.cpp db/pdfile.cpp db/cursor.cpp db/security_commands.cpp db/client.cpp db/security.cpp util/miniwebserver.cpp db/storage.cpp db/reccache.cpp db/queryoptimizer.cpp db/extsort.cpp db/mr.cpp s/d_util.cpp" )
+serverOnlyFiles = Split( "db/query.cpp db/update.cpp db/introspect.cpp db/btree.cpp db/clientcursor.cpp db/tests.cpp db/repl.cpp db/btreecursor.cpp db/cloner.cpp db/namespace.cpp db/matcher.cpp db/dbeval.cpp db/dbwebserver.cpp db/dbhelpers.cpp db/instance.cpp db/database.cpp db/pdfile.cpp db/cursor.cpp db/security_commands.cpp db/client.cpp db/security.cpp util/miniwebserver.cpp db/storage.cpp db/reccache.cpp db/queryoptimizer.cpp db/extsort.cpp db/mr.cpp s/d_util.cpp db/cmdline.cpp" )
serverOnlyFiles += [ "db/index.cpp" ] + Glob( "db/index_*.cpp" )
serverOnlyFiles += Glob( "db/dbcommands*.cpp" )
@@ -404,7 +404,7 @@ else:
nojni = True
coreShardFiles = []
-shardServerFiles = coreShardFiles + Glob( "s/strategy*.cpp" ) + [ "s/commands_admin.cpp" , "s/commands_public.cpp" , "s/request.cpp" , "s/cursors.cpp" , "s/server.cpp" , "s/chunk.cpp" , "s/shardkey.cpp" , "s/config.cpp" , "s/s_only.cpp" ]
+shardServerFiles = coreShardFiles + Glob( "s/strategy*.cpp" ) + [ "s/commands_admin.cpp" , "s/commands_public.cpp" , "s/request.cpp" , "s/cursors.cpp" , "s/server.cpp" , "s/chunk.cpp" , "s/shardkey.cpp" , "s/config.cpp" , "s/s_only.cpp" , "db/cmdline.cpp" ]
serverOnlyFiles += coreShardFiles + [ "s/d_logic.cpp" ]
serverOnlyFiles += [ "db/module.cpp" ] + Glob( "db/modules/*.cpp" )
diff --git a/buildscripts/hacks_ubuntu.py b/buildscripts/hacks_ubuntu.py
index 56649f8..81deddd 100644
--- a/buildscripts/hacks_ubuntu.py
+++ b/buildscripts/hacks_ubuntu.py
@@ -39,9 +39,14 @@ def foundxulrunner( env , options ):
env.Prepend( RPATH=[ libroot ] )
env.Prepend( CPPPATH=[ incroot + "stable/" ,
- incroot + "unstable/" ] )
+ incroot + "unstable/" ,
+ incroot ] )
+ env.Prepend( CPPPATH=[ "/usr/include/nspr/" ] )
env.Append( CPPDEFINES=[ "XULRUNNER" , "OLDJS" ] )
if best.find( "1.9.0" ) >= 0 or best.endswith("1.9"):
- env.Append( CPPDEFINES=[ "XULRUNNER190" ] )
+ if best.endswith( "1.9.1.9" ):
+ pass
+ else:
+ env.Append( CPPDEFINES=[ "XULRUNNER190" ] )
return True
diff --git a/client/clientOnly.cpp b/client/clientOnly.cpp
index e0f59a9..566095a 100644
--- a/client/clientOnly.cpp
+++ b/client/clientOnly.cpp
@@ -22,6 +22,8 @@
namespace mongo {
+ CmdLine cmdLine;
+
const char * curNs = "in client mode";
bool dbexitCalled = false;
diff --git a/client/dbclient.cpp b/client/dbclient.cpp
index d505c9f..7d04866 100644
--- a/client/dbclient.cpp
+++ b/client/dbclient.cpp
@@ -769,6 +769,8 @@ namespace mongo {
}
if ( !connector->call( toSend, *m, false ) )
return false;
+ if ( ! m->data )
+ return false;
dataReceived();
return true;
}
diff --git a/db/btree.cpp b/db/btree.cpp
index 18f9e76..0c8ca28 100644
--- a/db/btree.cpp
+++ b/db/btree.cpp
@@ -665,13 +665,18 @@ found:
if ( split_debug )
out() << " " << thisLoc.toString() << ".split" << endl;
- int mid = n / 2;
+ int split = n / 2;
+ if ( keypos == n ) { // see SERVER-983
+ split = 0.9 * n;
+ if ( split > n - 2 )
+ split = n - 2;
+ }
DiskLoc rLoc = addBucket(idx);
BtreeBucket *r = rLoc.btreemod();
if ( split_debug )
- out() << " mid:" << mid << ' ' << keyNode(mid).key.toString() << " n:" << n << endl;
- for ( int i = mid+1; i < n; i++ ) {
+ out() << " split:" << split << ' ' << keyNode(split).key.toString() << " n:" << n << endl;
+ for ( int i = split+1; i < n; i++ ) {
KeyNode kn = keyNode(i);
r->pushBack(kn.recordLoc, kn.key, order, kn.prevChildBucket);
}
@@ -684,18 +689,18 @@ found:
rLoc.btree()->fixParentPtrs(rLoc);
{
- KeyNode middle = keyNode(mid);
- nextChild = middle.prevChildBucket; // middle key gets promoted, its children will be thisLoc (l) and rLoc (r)
+ KeyNode splitkey = keyNode(split);
+ nextChild = splitkey.prevChildBucket; // splitkey key gets promoted, its children will be thisLoc (l) and rLoc (r)
if ( split_debug ) {
- out() << " middle key:" << middle.key.toString() << endl;
+ out() << " splitkey key:" << splitkey.key.toString() << endl;
}
- // promote middle to a parent node
+ // promote splitkey to a parent node
if ( parent.isNull() ) {
// make a new parent if we were the root
DiskLoc L = addBucket(idx);
BtreeBucket *p = L.btreemod();
- p->pushBack(middle.recordLoc, middle.key, order, thisLoc);
+ p->pushBack(splitkey.recordLoc, splitkey.key, order, thisLoc);
p->nextChild = rLoc;
p->assertValid( order );
parent = idx.head = L;
@@ -708,22 +713,22 @@ found:
*/
rLoc.btreemod()->parent = parent;
if ( split_debug )
- out() << " promoting middle key " << middle.key.toString() << endl;
- parent.btree()->_insert(parent, middle.recordLoc, middle.key, order, /*dupsallowed*/true, thisLoc, rLoc, idx);
+ out() << " promoting splitkey key " << splitkey.key.toString() << endl;
+ parent.btree()->_insert(parent, splitkey.recordLoc, splitkey.key, order, /*dupsallowed*/true, thisLoc, rLoc, idx);
}
}
- truncateTo(mid, order); // note this may trash middle.key. thus we had to promote it before finishing up here.
+ truncateTo(split, order); // note this may trash splitkey.key. thus we had to promote it before finishing up here.
// add our new key, there is room now
{
- if ( keypos <= mid ) {
+ if ( keypos <= split ) {
if ( split_debug )
- out() << " keypos<mid, insertHere() the new key" << endl;
+ out() << " keypos<split, insertHere() the new key" << endl;
insertHere(thisLoc, keypos, recordLoc, key, order, lchild, rchild, idx);
} else {
- int kp = keypos-mid-1;
+ int kp = keypos-split-1;
assert(kp>=0);
rLoc.btree()->insertHere(rLoc, kp, recordLoc, key, order, lchild, rchild, idx);
}
diff --git a/db/client.cpp b/db/client.cpp
index dc82a25..a2fe568 100644
--- a/db/client.cpp
+++ b/db/client.cpp
@@ -245,4 +245,21 @@ namespace mongo {
return b.obj();
}
+ int Client::recommendedYieldMicros(){
+ int num = 0;
+ {
+ scoped_lock bl(clientsMutex);
+ num = clients.size();
+ }
+
+ if ( --num <= 0 ) // -- is for myself
+ return 0;
+
+ if ( num > 50 )
+ num = 50;
+
+ num *= 100;
+ return num;
+ }
+
}
diff --git a/db/client.h b/db/client.h
index ab43509..c484198 100644
--- a/db/client.h
+++ b/db/client.h
@@ -45,6 +45,8 @@ namespace mongo {
static mongo::mutex clientsMutex;
static set<Client*> clients; // always be in clientsMutex when manipulating this
+ static int recommendedYieldMicros();
+
class GodScope {
bool _prev;
public:
diff --git a/db/clientcursor.cpp b/db/clientcursor.cpp
index be0bd2f..1281fc3 100644
--- a/db/clientcursor.cpp
+++ b/db/clientcursor.cpp
@@ -232,6 +232,7 @@ namespace mongo {
{
dbtempreleasecond unlock;
+ sleepmicros( Client::recommendedYieldMicros() );
}
if ( ClientCursor::find( id , false ) == 0 ){
diff --git a/db/cmdline.cpp b/db/cmdline.cpp
index 59eafdd..2d15279 100644
--- a/db/cmdline.cpp
+++ b/db/cmdline.cpp
@@ -23,7 +23,6 @@
namespace po = boost::program_options;
namespace mongo {
- CmdLine cmdLine;
void setupSignals();
BSONArray argvArray;
diff --git a/db/curop.h b/db/curop.h
index e5d38d7..21582f2 100644
--- a/db/curop.h
+++ b/db/curop.h
@@ -108,15 +108,19 @@ namespace mongo {
Top::global.record( _ns , _op , _lockType , now - _checkpoint , _command );
_checkpoint = now;
}
-
- void reset( const sockaddr_in & remote, int op ) {
+
+ void reset(){
_reset();
_start = _checkpoint = 0;
_active = true;
_opNum = _nextOpNum++;
_ns[0] = '?'; // just in case not set later
_debug.reset();
- resetQuery();
+ resetQuery();
+ }
+
+ void reset( const sockaddr_in & remote, int op ) {
+ reset();
_remote = remote;
_op = op;
}
diff --git a/db/db.cpp b/db/db.cpp
index fe63df1..9ff49ba 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -45,6 +45,8 @@
namespace mongo {
+ CmdLine cmdLine;
+
bool useJNI = true;
/* only off if --nocursors which is for debugging. */
diff --git a/db/db.h b/db/db.h
index 0bbc97b..78fc98d 100644
--- a/db/db.h
+++ b/db/db.h
@@ -141,8 +141,11 @@ namespace mongo {
string _todb( const string& ns ) const {
size_t i = ns.find( '.' );
- if ( i == string::npos )
+ if ( i == string::npos ){
+ uassert( 13074 , "db name can't be empty" , ns.size() );
return ns;
+ }
+ uassert( 13075 , "db name can't be empty" , i > 0 );
return ns.substr( 0 , i );
}
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index 6d1aa5a..46c2e4d 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -1020,19 +1020,26 @@ namespace mongo {
namespace {
long long getIndexSizeForCollection(string db, string ns, BSONObjBuilder* details=NULL, int scale = 1 ){
- DBDirectClient client;
- auto_ptr<DBClientCursor> indexes =
- client.query(db + ".system.indexes", QUERY( "ns" << ns));
-
- long long totalSize = 0;
- while (indexes->more()){
- BSONObj index = indexes->nextSafe();
- NamespaceDetails * nsd = nsdetails( (ns + ".$" + index["name"].valuestrsafe()).c_str() );
- if (!nsd)
- continue; // nothing to do here
- totalSize += nsd->datasize;
- if (details)
- details->appendNumber(index["name"].valuestrsafe(), nsd->datasize / scale );
+ dbMutex.assertAtLeastReadLocked();
+
+ NamespaceDetails * nsd = nsdetails( ns.c_str() );
+ if ( ! nsd )
+ return 0;
+
+ long long totalSize = 0;
+
+ NamespaceDetails::IndexIterator ii = nsd->ii();
+ while ( ii.more() ){
+ IndexDetails& d = ii.next();
+ string collNS = d.indexNamespace();
+ NamespaceDetails * mine = nsdetails( collNS.c_str() );
+ if ( ! mine ){
+ log() << "error: have index [" << collNS << "] but no NamespaceDetails" << endl;
+ continue;
+ }
+ totalSize += mine->datasize;
+ if ( details )
+ details->appendNumber( d.indexName() , mine->datasize / scale );
}
return totalSize;
}
diff --git a/db/dbwebserver.cpp b/db/dbwebserver.cpp
index 75d3a92..c55c8a6 100644
--- a/db/dbwebserver.cpp
+++ b/db/dbwebserver.cpp
@@ -241,6 +241,8 @@ namespace mongo {
if ( from.localhost() )
return true;
+ Client::GodScope gs;
+
if ( db.findOne( "admin.system.users" , BSONObj() , 0 , QueryOption_SlaveOk ).isEmpty() )
return true;
@@ -315,6 +317,7 @@ namespace mongo {
responseMsg = "not allowed\n";
return;
}
+ headers.push_back( "Content-Type: application/json" );
generateServerStatus( url , responseMsg );
responseCode = 200;
return;
@@ -519,7 +522,7 @@ namespace mongo {
BSONObj query = queryBuilder.obj();
auto_ptr<DBClientCursor> cursor = db.query( ns.c_str() , query, num , skip );
-
+ uassert( 13085 , "query failed for dbwebserver" , cursor.get() );
if ( one ) {
if ( cursor->more() ) {
BSONObj obj = cursor->next();
diff --git a/db/index_geo2d.cpp b/db/index_geo2d.cpp
index 4730c29..5ebf65a 100644
--- a/db/index_geo2d.cpp
+++ b/db/index_geo2d.cpp
@@ -893,14 +893,14 @@ namespace mongo {
public:
typedef multiset<GeoPoint> Holder;
- GeoHopper( const Geo2dType * g , unsigned max , const GeoHash& n , const BSONObj& filter = BSONObj() )
- : GeoAccumulator( g , filter ) , _max( max ) , _near( n ) {
+ GeoHopper( const Geo2dType * g , unsigned max , const GeoHash& n , const BSONObj& filter = BSONObj() , double maxDistance = numeric_limits<double>::max() )
+ : GeoAccumulator( g , filter ) , _max( max ) , _near( n ), _maxDistance( maxDistance ) {
}
virtual bool checkDistance( const GeoHash& h , double& d ){
d = _g->distance( _near , h );
- bool good = _points.size() < _max || d < farthest();
+ bool good = d < _maxDistance && ( _points.size() < _max || d < farthest() );
GEODEBUG( "\t\t\t\t\t\t\t checkDistance " << _near << "\t" << h << "\t" << d
<< " ok: " << good << " farthest: " << farthest() );
return good;
@@ -926,6 +926,7 @@ namespace mongo {
unsigned _max;
GeoHash _near;
Holder _points;
+ double _maxDistance;
};
@@ -999,10 +1000,10 @@ namespace mongo {
class GeoSearch {
public:
- GeoSearch( const Geo2dType * g , const GeoHash& n , int numWanted=100 , BSONObj filter=BSONObj() )
+ GeoSearch( const Geo2dType * g , const GeoHash& n , int numWanted=100 , BSONObj filter=BSONObj() , double maxDistance = numeric_limits<double>::max() )
: _spec( g ) , _n( n ) , _start( n ) ,
- _numWanted( numWanted ) , _filter( filter ) ,
- _hopper( new GeoHopper( g , numWanted , n , filter ) )
+ _numWanted( numWanted ) , _filter( filter ) , _maxDistance( maxDistance ) ,
+ _hopper( new GeoHopper( g , numWanted , n , filter , maxDistance ) )
{
assert( g->getDetails() );
_nscanned = 0;
@@ -1042,6 +1043,10 @@ namespace mongo {
if ( ! _prefix.constrains() )
break;
_prefix = _prefix.up();
+
+ double temp = _spec->distance( _prefix , _start );
+ if ( temp > ( _maxDistance * 2 ) )
+ break;
}
}
GEODEBUG( "done part 1" );
@@ -1105,6 +1110,7 @@ namespace mongo {
GeoHash _prefix;
int _numWanted;
BSONObj _filter;
+ double _maxDistance;
shared_ptr<GeoHopper> _hopper;
long long _nscanned;
@@ -1478,7 +1484,16 @@ namespace mongo {
switch ( e.embeddedObject().firstElement().getGtLtOp() ){
case BSONObj::opNEAR: {
e = e.embeddedObject().firstElement();
- shared_ptr<GeoSearch> s( new GeoSearch( this , _tohash(e) , numWanted , query ) );
+ double maxDistance = numeric_limits<double>::max();
+ if ( e.isABSONObj() && e.embeddedObject().nFields() > 2 ){
+ BSONObjIterator i(e.embeddedObject());
+ i.next();
+ i.next();
+ BSONElement e = i.next();
+ if ( e.isNumber() )
+ maxDistance = e.numberDouble();
+ }
+ shared_ptr<GeoSearch> s( new GeoSearch( this , _tohash(e) , numWanted , query , maxDistance ) );
s->exec();
auto_ptr<Cursor> c;
c.reset( new GeoSearchCursor( s ) );
@@ -1568,7 +1583,11 @@ namespace mongo {
if ( cmdObj["query"].type() == Object )
filter = cmdObj["query"].embeddedObject();
- GeoSearch gs( g , n , numWanted , filter );
+ double maxDistance = numeric_limits<double>::max();
+ if ( cmdObj["maxDistance"].isNumber() )
+ maxDistance = cmdObj["maxDistance"].number();
+
+ GeoSearch gs( g , n , numWanted , filter , maxDistance );
if ( cmdObj["start"].type() == String){
GeoHash start = (string) cmdObj["start"].valuestr();
diff --git a/db/lasterror.cpp b/db/lasterror.cpp
index 9fefcfa..53042e7 100644
--- a/db/lasterror.cpp
+++ b/db/lasterror.cpp
@@ -72,8 +72,14 @@ namespace mongo {
LastError * LastErrorHolder::_get( bool create ){
int id = _id.get();
- if ( id == 0 )
- return _tl.get();
+ if ( id == 0 ){
+ LastError * le = _tl.get();
+ if ( ! le && create ){
+ le = new LastError();
+ _tl.reset( le );
+ }
+ return le;
+ }
scoped_lock lock(_idsmutex);
map<int,Status>::iterator i = _ids.find( id );
diff --git a/db/pdfile.cpp b/db/pdfile.cpp
index 1c4608c..e46ffb7 100644
--- a/db/pdfile.cpp
+++ b/db/pdfile.cpp
@@ -45,6 +45,8 @@ _ disallow system* manipulations from the database.
namespace mongo {
+ const int MaxExtentSize = 0x7ff00000;
+
map<string, unsigned> BackgroundOperation::dbsInProg;
set<string> BackgroundOperation::nsInProg;
@@ -357,7 +359,7 @@ namespace mongo {
Extent* MongoDataFile::createExtent(const char *ns, int approxSize, bool newCapped, int loops) {
massert( 10357 , "shutdown in progress", !goingAway );
- massert( 10358 , "bad new extent size", approxSize >= 0 && approxSize <= 0x7ff00000 );
+ massert( 10358 , "bad new extent size", approxSize >= 0 && approxSize <= MaxExtentSize );
massert( 10359 , "header==0 on new extent: 32 bit mmap space exceeded?", header ); // null if file open failed
int ExtentSize = approxSize <= header->unusedLength ? approxSize : header->unusedLength;
DiskLoc loc;
@@ -919,11 +921,19 @@ namespace mongo {
}
int followupExtentSize(int len, int lastExtentLen) {
+ assert( len < MaxExtentSize );
int x = initialExtentSize(len);
int y = (int) (lastExtentLen < 4000000 ? lastExtentLen * 4.0 : lastExtentLen * 1.2);
int sz = y > x ? y : x;
+
+ if ( sz < lastExtentLen )
+ sz = lastExtentLen;
+ else if ( sz > MaxExtentSize )
+ sz = MaxExtentSize;
+
sz = ((int)sz) & 0xffffff00;
assert( sz > len );
+
return sz;
}
@@ -1141,7 +1151,7 @@ namespace mongo {
break;
}
}
- progress.done();
+ progress.finished();
return n;
}
@@ -1192,7 +1202,7 @@ namespace mongo {
// throws DBException
static void buildAnIndex(string ns, NamespaceDetails *d, IndexDetails& idx, int idxNo, bool background) {
- log() << "building new index on " << idx.keyPattern() << " for " << ns << endl;
+ log() << "building new index on " << idx.keyPattern() << " for " << ns << ( background ? " background" : "" ) << endl;
Timer t;
unsigned long long n;
diff --git a/db/repl.cpp b/db/repl.cpp
index 62b2986..137c25f 100644
--- a/db/repl.cpp
+++ b/db/repl.cpp
@@ -102,7 +102,7 @@ namespace mongo {
return;
info = _comment;
if ( n != state && !cmdLine.quiet )
- log() << "pair: setting master=" << n << " was " << state << '\n';
+ log() << "pair: setting master=" << n << " was " << state << endl;
state = n;
}
@@ -732,7 +732,7 @@ namespace mongo {
( replPair && replSettings.fastsync ) ) {
DBDirectClient c;
if ( c.exists( "local.oplog.$main" ) ) {
- BSONObj op = c.findOne( "local.oplog.$main", Query().sort( BSON( "$natural" << -1 ) ) );
+ BSONObj op = c.findOne( "local.oplog.$main", QUERY( "op" << NE << "n" ).sort( BSON( "$natural" << -1 ) ) );
if ( !op.isEmpty() ) {
tmp.syncedTo = op[ "ts" ].date();
tmp._lastSavedLocalTs = op[ "ts" ].date();
@@ -938,6 +938,7 @@ namespace mongo {
}
Client::Context ctx( ns );
+ ctx.getClient()->curop()->reset();
bool empty = ctx.db()->isEmpty();
bool incompleteClone = incompleteCloneDbs.count( clientName ) != 0;
@@ -1606,6 +1607,7 @@ namespace mongo {
ReplInfo r("replMain load sources");
dblock lk;
ReplSource::loadAll(sources);
+ replSettings.fastsync = false; // only need this param for initial reset
}
if ( sources.empty() ) {
@@ -1860,6 +1862,9 @@ namespace mongo {
createOplog();
boost::thread t(replMasterThread);
}
+
+ while( replSettings.fastsync ) // don't allow writes until we've set up from log
+ sleepmillis( 50 );
}
/* called from main at server startup */
diff --git a/db/repl.h b/db/repl.h
index c5e0f63..a42fa8e 100644
--- a/db/repl.h
+++ b/db/repl.h
@@ -205,7 +205,10 @@ namespace mongo {
public:
MemIds() : size_() {}
friend class IdTracker;
- void reset() { imp_.clear(); }
+ void reset() {
+ imp_.clear();
+ size_ = 0;
+ }
bool get( const char *ns, const BSONObj &id ) { return imp_[ ns ].count( id ); }
void set( const char *ns, const BSONObj &id, bool val ) {
if ( val ) {
diff --git a/db/update.cpp b/db/update.cpp
index d6a5c5e..7049fff 100644
--- a/db/update.cpp
+++ b/db/update.cpp
@@ -24,11 +24,11 @@
#include "repl.h"
#include "update.h"
+//#define DEBUGUPDATE(x) cout << x << endl;
+#define DEBUGUPDATE(x)
+
namespace mongo {
- //#define DEBUGUPDATE(x) cout << x << endl;
-#define DEBUGUPDATE(x)
-
const char* Mod::modNames[] = { "$inc", "$set", "$push", "$pushAll", "$pull", "$pullAll" , "$pop", "$unset" ,
"$bitand" , "$bitor" , "$bit" , "$addToSet" };
unsigned Mod::modNamesNum = sizeof(Mod::modNames)/sizeof(char*);
@@ -310,11 +310,12 @@ namespace mongo {
// Perform this check first, so that we don't leave a partially modified object on uassert.
for ( ModHolder::const_iterator i = _mods.begin(); i != _mods.end(); ++i ) {
+ DEBUGUPDATE( "\t\t prepare : " << i->first );
ModState& ms = mss->_mods[i->first];
const Mod& m = i->second;
BSONElement e = obj.getFieldDotted(m.fieldName);
-
+
ms.m = &m;
ms.old = e;
@@ -406,6 +407,7 @@ namespace mongo {
mss->amIInPlacePossible( false );
}
}
+
return auto_ptr<ModSetState>( mss );
}
@@ -424,7 +426,7 @@ namespace mongo {
// [dm] the BSONElementManipulator statements below are for replication (correct?)
case Mod::INC:
m.m->incrementMe( m.old );
- m.fixedName = "$set";
+ m.fixedOpName = "$set";
m.fixed = &(m.old);
break;
case Mod::SET:
@@ -477,6 +479,7 @@ namespace mongo {
template< class Builder >
void ModSetState::createNewFromMods( const string& root , Builder& b , const BSONObj &obj ){
+ DEBUGUPDATE( "\t\t createNewFromMods root: " << root );
BSONObjIteratorSorted es( obj );
BSONElement e = es.next();
@@ -488,6 +491,8 @@ namespace mongo {
while ( e.type() && m != mend ){
string field = root + e.fieldName();
FieldCompareResult cmp = compareDottedFieldNames( m->second.m->fieldName , field );
+
+ DEBUGUPDATE( "\t\t\t" << field << "\t" << m->second.m->fieldName << "\t" << cmp );
switch ( cmp ){
@@ -809,11 +814,13 @@ namespace mongo {
const BSONObj& onDisk = loc.obj();
ModSet * useMods = mods.get();
+ bool forceRewrite = false;
auto_ptr<ModSet> mymodset;
if ( u->getMatchDetails().elemMatchKey && mods->hasDynamicArray() ){
useMods = mods->fixDynamicArray( u->getMatchDetails().elemMatchKey );
mymodset.reset( useMods );
+ forceRewrite = true;
}
@@ -850,7 +857,7 @@ namespace mongo {
pattern = patternBuilder.obj();
}
- if ( mss->needOpLogRewrite() ){
+ if ( forceRewrite || mss->needOpLogRewrite() ){
DEBUGUPDATE( "\t rewrite update: " << mss->getOpLogRewrite() );
logOp("u", ns, mss->getOpLogRewrite() , &pattern );
}
diff --git a/db/update.h b/db/update.h
index e14b0fb..5d20114 100644
--- a/db/update.h
+++ b/db/update.h
@@ -327,7 +327,7 @@ namespace mongo {
const Mod * m;
BSONElement old;
- const char * fixedName;
+ const char * fixedOpName;
BSONElement * fixed;
int pushStartSize;
@@ -337,7 +337,7 @@ namespace mongo {
long long inclong;
ModState(){
- fixedName = 0;
+ fixedOpName = 0;
fixed = 0;
pushStartSize = -1;
incType = EOO;
@@ -352,7 +352,7 @@ namespace mongo {
}
bool needOpLogRewrite() const {
- if ( fixed || fixedName || incType )
+ if ( fixed || fixedOpName || incType )
return true;
switch( op() ){
@@ -374,13 +374,13 @@ namespace mongo {
return;
}
- const char * name = fixedName ? fixedName : Mod::modNames[op()];
+ const char * name = fixedOpName ? fixedOpName : Mod::modNames[op()];
BSONObjBuilder bb( b.subobjStart( name ) );
if ( fixed )
bb.appendAs( *fixed , m->fieldName );
else
- bb.append( m->elt );
+ bb.appendAs( m->elt , m->fieldName );
bb.done();
}
@@ -470,7 +470,7 @@ namespace mongo {
break;
case Mod::INC:
- ms.fixedName = "$set";
+ ms.fixedOpName = "$set";
case Mod::SET: {
m._checkForAppending( m.elt );
b.appendAs( m.elt, m.shortFieldName );
diff --git a/dbtests/basictests.cpp b/dbtests/basictests.cpp
index eaadf75..4c80cd4 100644
--- a/dbtests/basictests.cpp
+++ b/dbtests/basictests.cpp
@@ -306,11 +306,14 @@ namespace BasicTests {
ASSERT_EQUALS( 1, lexNumCmp( "f12g", "f12f" ) );
ASSERT_EQUALS( 1, lexNumCmp( "aa{", "aab" ) );
ASSERT_EQUALS( 1, lexNumCmp( "aa{", "aa1" ) );
- ASSERT_EQUALS( 1, lexNumCmp( "a1{", "a11" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a1{", "a11" ) );
ASSERT_EQUALS( 1, lexNumCmp( "a1{a", "a1{" ) );
ASSERT_EQUALS( -1, lexNumCmp( "a1{", "a1{a" ) );
ASSERT_EQUALS( 1, lexNumCmp("21", "11") );
ASSERT_EQUALS( -1, lexNumCmp("11", "21") );
+
+ ASSERT_EQUALS( -1 , lexNumCmp( "a.0" , "a.1" ) );
+ ASSERT_EQUALS( -1 , lexNumCmp( "a.0.b" , "a.1" ) );
}
};
diff --git a/dbtests/btreetests.cpp b/dbtests/btreetests.cpp
index 3c9dc8d..390071f 100644
--- a/dbtests/btreetests.cpp
+++ b/dbtests/btreetests.cpp
@@ -72,6 +72,9 @@ namespace BtreeTests {
bt()->assertValid( order(), true );
ASSERT_EQUALS( nKeys, bt()->fullValidate( dl(), order() ) );
}
+ void dump() {
+ bt()->dumpTree( dl(), order() );
+ }
void insert( BSONObj &key ) {
bt()->bt_insert( dl(), recordLoc(), key, order(), true, id(), true );
}
@@ -206,10 +209,12 @@ namespace BtreeTests {
class MissingLocateMultiBucket : public Base {
public:
void run() {
- for ( int i = 0; i < 10; ++i ) {
- BSONObj k = key( 'b' + 2 * i );
- insert( k );
+ for ( int i = 0; i < 8; ++i ) {
+ insert( i );
}
+ insert( 9 );
+ insert( 8 );
+// dump();
BSONObj straddle = key( 'i' );
locate( straddle, 0, false, dl(), 1 );
straddle = key( 'k' );
@@ -219,8 +224,34 @@ namespace BtreeTests {
BSONObj key( char c ) {
return simpleKey( c, 800 );
}
+ void insert( int i ) {
+ BSONObj k = key( 'b' + 2 * i );
+ Base::insert( k );
+ }
};
+ class SERVER983 : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 10; ++i ) {
+ insert( i );
+ }
+// dump();
+ BSONObj straddle = key( 'o' );
+ locate( straddle, 0, false, dl(), 1 );
+ straddle = key( 'q' );
+ locate( straddle, 0, false, dl(), -1 );
+ }
+ private:
+ BSONObj key( char c ) {
+ return simpleKey( c, 800 );
+ }
+ void insert( int i ) {
+ BSONObj k = key( 'b' + 2 * i );
+ Base::insert( k );
+ }
+ };
+
class All : public Suite {
public:
All() : Suite( "btree" ){
@@ -233,6 +264,7 @@ namespace BtreeTests {
add< SplitLeftHeavyBucket >();
add< MissingLocate >();
add< MissingLocateMultiBucket >();
+ add< SERVER983 >();
}
} myall;
}
diff --git a/dbtests/framework.cpp b/dbtests/framework.cpp
index 4553686..0566aa8 100644
--- a/dbtests/framework.cpp
+++ b/dbtests/framework.cpp
@@ -34,6 +34,8 @@ namespace po = boost::program_options;
namespace mongo {
+ CmdLine cmdLine;
+
namespace regression {
map<string,Suite*> * mongo::regression::Suite::_suites = 0;
diff --git a/dbtests/repltests.cpp b/dbtests/repltests.cpp
index c6ef6c2..53e3609 100644
--- a/dbtests/repltests.cpp
+++ b/dbtests/repltests.cpp
@@ -1053,7 +1053,8 @@ namespace ReplTests {
check();
ASSERT( !s_.inMem() );
- s_.reset();
+ s_.reset( 4 * sizeof( BSONObj ) - 1 );
+ s_.mayUpgradeStorage();
ASSERT( s_.inMem() );
}
private:
diff --git a/debian/changelog b/debian/changelog
index d99fb63..8dff07d 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,17 @@
+mongodb (1.4.2) unstable; urgency=low
+
+ * bug fixes
+
+ -- Richard Kreuter <richard@10gen.com> Tue, 27 Apr 2010 16:56:28 -0500
+
+
+mongodb (1.4.1) unstable; urgency=low
+
+ * bug fixes
+
+ -- Richard Kreuter <richard@10gen.com> Wed, 14 Apr 2010 16:56:28 -0500
+
+
mongodb (1.4.0) unstable; urgency=low
* stable release
diff --git a/debian/files b/debian/files
new file mode 100644
index 0000000..2e28959
--- /dev/null
+++ b/debian/files
@@ -0,0 +1 @@
+mongodb_0.9.7_amd64.deb devel optional
diff --git a/debian/mongodb.upstart b/debian/mongodb.upstart
new file mode 100644
index 0000000..ca6f9b7
--- /dev/null
+++ b/debian/mongodb.upstart
@@ -0,0 +1,15 @@
+# Ubuntu upstart file at /etc/init/mongodb.conf
+
+pre-start script
+ mkdir -p /var/lib/mongodb/
+ mkdir -p /var/log/mongodb/
+end script
+
+start on runlevel [2345]
+stop on runlevel [06]
+
+script
+ ENABLE_MONGODB="yes"
+ if [ -f /etc/default/mongodb ]; then . /etc/default/mongodb; fi
+ if [ "x$ENABLE_MONGODB" = "xyes" ]; then exec start-stop-daemon --start --quiet --chuid mongodb --exec /usr/bin/mongod -- --config /etc/mongodb.conf; fi
+end script
diff --git a/debian/preinst b/debian/preinst
new file mode 100644
index 0000000..c2d5362
--- /dev/null
+++ b/debian/preinst
@@ -0,0 +1,37 @@
+#!/bin/sh
+# preinst script for mongodb
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * <new-preinst> `install'
+# * <new-preinst> `install' <old-version>
+# * <new-preinst> `upgrade' <old-version>
+# * <old-preinst> `abort-upgrade' <new-version>
+# for details, see http://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ install|upgrade)
+ ;;
+
+ abort-upgrade)
+ ;;
+
+ *)
+ echo "preinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
+
+
diff --git a/doxygenConfig b/doxygenConfig
index dacf258..37ca7f2 100644
--- a/doxygenConfig
+++ b/doxygenConfig
@@ -3,7 +3,7 @@
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = MongoDB
-PROJECT_NUMBER = 1.4.0
+PROJECT_NUMBER = 1.4.2
OUTPUT_DIRECTORY = docs
CREATE_SUBDIRS = NO
OUTPUT_LANGUAGE = English
diff --git a/jstests/geo2.js b/jstests/geo2.js
index b9452c8..6b1a1a2 100644
--- a/jstests/geo2.js
+++ b/jstests/geo2.js
@@ -41,3 +41,8 @@ assert.close( fast.stats.avgDistance , a( t.find( { loc : { $near : [ 50 , 50 ]
printjson( t.find( { loc : { $near : [ 50 , 50 ] } } ).explain() )
+assert.lt( 3 , a( t.find( { loc : { $near : [ 50 , 50 ] } } ).limit(50) ) , "C1" )
+assert.gt( 3 , a( t.find( { loc : { $near : [ 50 , 50 , 3 ] } } ).limit(50) ) , "C2" )
+
+
+
diff --git a/jstests/repl/basic1.js b/jstests/repl/basic1.js
index e0acf5c..594ba07 100644
--- a/jstests/repl/basic1.js
+++ b/jstests/repl/basic1.js
@@ -7,6 +7,11 @@ var rt = new ReplTest( "basic1" );
m = rt.start( true );
s = rt.start( false );
+function block(){
+ am.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 3000 } )
+ sleep(3000); // 1.4 branch doesn't support w
+}
+
function hash( db ){
var s = "";
var a = db.getCollectionNames();
@@ -90,13 +95,38 @@ checkMR( am.mr );
checkMR( as.mr );
checkNumCollections( "MR2" );
-sleep( 3000 );
+block();
checkNumCollections( "MR3" );
var res = am.mr.mapReduce( m , r , { out : "xyz" } );
-sleep( 3000 );
+block();
+
checkNumCollections( "MR4" );
+
+t = am.rpos;
+t.insert( { _id : 1 , a : [ { n : "a" , c : 1 } , { n : "b" , c : 1 } , { n : "c" , c : 1 } ] , b : [ 1 , 2 , 3 ] } )
+block();
+check( "after pos 1 " );
+
+t.update( { "a.n" : "b" } , { $inc : { "a.$.c" : 1 } } )
+block();
+check( "after pos 2 " );
+
+t.update( { "b" : 2 } , { $inc : { "b.$" : 1 } } )
+block();
+check( "after pos 3 " );
+
+t.update( { "b" : 3} , { $set : { "b.$" : 17 } } )
+block();
+check( "after pos 4 " );
+
+
+printjson( am.rpos.findOne() )
+printjson( as.rpos.findOne() )
+
+//am.getSisterDB( "local" ).getCollection( "oplog.$main" ).find().limit(10).sort( { $natural : -1 } ).forEach( printjson )
+
rt.stop();
diff --git a/jstests/repl/snapshot3.js b/jstests/repl/snapshot3.js
index 5380bbf..296ebd0 100644
--- a/jstests/repl/snapshot3.js
+++ b/jstests/repl/snapshot3.js
@@ -2,7 +2,7 @@
ports = allocatePorts( 3 );
-var baseName = "repl_snapshot2";
+var baseName = "repl_snapshot3";
var basePath = "/data/db/" + baseName;
a = new MongodRunner( ports[ 0 ], basePath + "-arbiter" );
diff --git a/jstests/update_arraymatch2.js b/jstests/update_arraymatch2.js
index 7eb810b..c07a61c 100644
--- a/jstests/update_arraymatch2.js
+++ b/jstests/update_arraymatch2.js
@@ -1,4 +1,4 @@
-t = db.tilde;
+t = db.update_arraymatch2;
t.drop();
t.insert( { } );
diff --git a/jstests/updated.js b/jstests/updated.js
new file mode 100644
index 0000000..c4c11be
--- /dev/null
+++ b/jstests/updated.js
@@ -0,0 +1,20 @@
+
+t = db.updated;
+t.drop()
+
+o = { _id : Math.random() ,
+ items:[null,null,null,null]
+ };
+
+t.insert( o );
+assert.eq( o , t.findOne() , "A1" );
+
+o.items[0] = {amount:9000,itemId:1};
+t.update({},{$set:{"items.0":o.items[0]}});
+assert.eq( o , t.findOne() , "A2" );
+
+o.items[0].amount += 1000;
+o.items[1] = {amount:1,itemId:2};
+t.update({},{$inc:{"items.0.amount":1000},$set:{"items.1":o.items[1]}});
+assert.eq( o , t.findOne() , "A3" );
+
diff --git a/lib/libboost_thread-gcc41-mt-d-1_34_1.a b/lib/libboost_thread-gcc41-mt-d-1_34_1.a
new file mode 100644
index 0000000..09377ac
--- /dev/null
+++ b/lib/libboost_thread-gcc41-mt-d-1_34_1.a
Binary files differ
diff --git a/rpm/mongo.spec b/rpm/mongo.spec
index eac1cff..526cf4b 100644
--- a/rpm/mongo.spec
+++ b/rpm/mongo.spec
@@ -1,5 +1,5 @@
Name: mongo
-Version: 1.4.0
+Version: 1.4.2
Release: mongodb_1%{?dist}
Summary: mongo client shell and tools
License: AGPL 3.0
diff --git a/s/server.cpp b/s/server.cpp
index 3644376..6141816 100644
--- a/s/server.cpp
+++ b/s/server.cpp
@@ -28,7 +28,8 @@
#include "chunk.h"
namespace mongo {
-
+
+ CmdLine cmdLine;
Database *database = 0;
string mongosCommand;
string ourHostname;
diff --git a/scripting/engine_spidermonkey.h b/scripting/engine_spidermonkey.h
index a39d8fb..4e420de 100644
--- a/scripting/engine_spidermonkey.h
+++ b/scripting/engine_spidermonkey.h
@@ -42,6 +42,7 @@
#endif
#include "jsapi.h"
+#include "jsobj.h"
#include "jsdate.h"
#include "jsregexp.h"
diff --git a/stdafx.cpp b/stdafx.cpp
index 0a80de6..09f202a 100644
--- a/stdafx.cpp
+++ b/stdafx.cpp
@@ -32,6 +32,6 @@
namespace mongo {
- const char versionString[] = "1.4.0";
+ const char versionString[] = "1.4.2";
} // namespace mongo
diff --git a/tools/tool.cpp b/tools/tool.cpp
index c92a0c4..c9a2977 100644
--- a/tools/tool.cpp
+++ b/tools/tool.cpp
@@ -32,6 +32,8 @@ namespace po = boost::program_options;
namespace mongo {
+ CmdLine cmdLine;
+
Tool::Tool( string name , bool localDBAllowed , string defaultDB , string defaultCollection ) :
_name( name ) , _db( defaultDB ) , _coll( defaultCollection ) , _conn(0), _paired(false) {
@@ -157,6 +159,7 @@ namespace mongo {
if ( _params.count( "directoryperdb" ) ) {
directoryperdb = true;
}
+ assert( lastError.get( true ) );
Client::initThread("tools");
_conn = new DBDirectClient();
_host = "DIRECT";
diff --git a/util/assert_util.cpp b/util/assert_util.cpp
index 8c8477a..b4659cc 100644
--- a/util/assert_util.cpp
+++ b/util/assert_util.cpp
@@ -143,6 +143,7 @@ namespace mongo {
dbexit( EXIT_BADOPTIONS );
assert( 0 );
}
+ fclose( test );
_path = lp;
_enabled = 1;
diff --git a/util/goodies.h b/util/goodies.h
index 4641941..cd5423b 100644
--- a/util/goodies.h
+++ b/util/goodies.h
@@ -195,6 +195,8 @@ namespace mongo {
boost::thread::sleep(xt);
}
inline void sleepmicros(int s) {
+ if ( s <= 0 )
+ return;
boost::xtime xt;
boost::xtime_get(&xt, boost::TIME_UTC);
xt.sec += ( s / 1000000 );
@@ -215,6 +217,8 @@ namespace mongo {
}
}
inline void sleepmicros(int s) {
+ if ( s <= 0 )
+ return;
struct timespec t;
t.tv_sec = (int)(s / 1000000);
t.tv_nsec = s % 1000000;
@@ -650,43 +654,54 @@ namespace mongo {
// for convenience, '{' is greater than anything and stops number parsing
inline int lexNumCmp( const char *s1, const char *s2 ) {
- int nret = 0;
while( *s1 && *s2 ) {
+
bool p1 = ( *s1 == '{' );
bool p2 = ( *s2 == '{' );
if ( p1 && !p2 )
return 1;
if ( p2 && !p1 )
return -1;
+
bool n1 = isNumber( *s1 );
bool n2 = isNumber( *s2 );
+
if ( n1 && n2 ) {
- if ( nret == 0 ) {
- nret = *s1 > *s2 ? 1 : ( *s1 == *s2 ? 0 : -1 );
- }
- } else if ( n1 ) {
- return 1;
- } else if ( n2 ) {
- return -1;
- } else {
- if ( nret ) {
- return nret;
- }
- if ( *s1 > *s2 ) {
+ char * e1;
+ char * e2;
+ long l1 = strtol( s1 , &e1 , 10 );
+ long l2 = strtol( s2 , &e2 , 10 );
+
+ if ( l1 > l2 )
return 1;
- } else if ( *s2 > *s1 ) {
+ else if ( l1 < l2 )
return -1;
- }
- nret = 0;
- }
- ++s1; ++s2;
+
+ s1 = e1;
+ s2 = e2;
+ continue;
+ }
+
+ if ( n1 )
+ return 1;
+
+ if ( n2 )
+ return -1;
+
+ if ( *s1 > *s2 )
+ return 1;
+
+ if ( *s2 > *s1 )
+ return -1;
+
+ s1++; s2++;
}
- if ( *s1 ) {
+
+ if ( *s1 )
return 1;
- } else if ( *s2 ) {
+ if ( *s2 )
return -1;
- }
- return nret;
+ return 0;
}
} // namespace mongo