summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAntonin Kral <a.kral@bobek.cz>2010-05-30 10:45:58 +0200
committerAntonin Kral <a.kral@bobek.cz>2010-05-30 10:45:58 +0200
commit4a81fe000acddefe863a1719504034833d9840e5 (patch)
tree30ff6616116f99e5fd91d90bd3fb51ce5815a312
parent3b11d683965c9336db1b0ea58c7d9906d9302a7b (diff)
downloadmongodb-4a81fe000acddefe863a1719504034833d9840e5.tar.gz
Imported Upstream version 1.4.3
-rw-r--r--client/connpool.cpp8
-rw-r--r--client/connpool.h4
-rw-r--r--client/dbclient.cpp2
-rw-r--r--client/dbclient.h2
-rw-r--r--db/db.cpp2
-rw-r--r--db/dbcommands.cpp5
-rw-r--r--db/index.cpp46
-rw-r--r--db/pdfile.cpp5
-rw-r--r--db/repl.cpp2
-rw-r--r--db/repl.h22
-rw-r--r--db/update.cpp13
-rw-r--r--db/update.h48
-rw-r--r--dbtests/querytests.cpp39
-rw-r--r--debian/changelog6
-rw-r--r--debian/files1
-rw-r--r--debian/mongodb.upstart15
-rw-r--r--debian/preinst37
-rw-r--r--doxygenConfig2
-rw-r--r--jstests/clone/clonecollection.js39
-rw-r--r--jstests/index_arr1.js23
-rw-r--r--jstests/index_arr2.js51
-rw-r--r--jstests/update_multi3.js25
-rw-r--r--lib/libboost_thread-gcc41-mt-d-1_34_1.abin0 -> 692920 bytes
-rw-r--r--rpm/mongo.spec2
-rw-r--r--stdafx.cpp2
-rw-r--r--util/mmap.h1
-rw-r--r--util/mmap_posix.cpp1
-rw-r--r--util/mmap_win.cpp11
-rw-r--r--util/ntservice.cpp20
29 files changed, 362 insertions, 72 deletions
diff --git a/client/connpool.cpp b/client/connpool.cpp
index d69c787..5a08483 100644
--- a/client/connpool.cpp
+++ b/client/connpool.cpp
@@ -115,9 +115,11 @@ namespace mongo {
}
ScopedDbConnection::~ScopedDbConnection() {
- if ( _conn && ! _conn->isFailed() ) {
- /* see done() comments above for why we log this line */
- log() << "~ScopedDBConnection: _conn != null" << endl;
+ if ( _conn ){
+ if ( ! _conn->isFailed() ) {
+ /* see done() comments above for why we log this line */
+ log() << "~ScopedDBConnection: _conn != null" << endl;
+ }
kill();
}
}
diff --git a/client/connpool.h b/client/connpool.h
index 5a47b01..b44ff51 100644
--- a/client/connpool.h
+++ b/client/connpool.h
@@ -61,8 +61,10 @@ namespace mongo {
void flush();
DBClientBase *get(const string& host);
void release(const string& host, DBClientBase *c) {
- if ( c->isFailed() )
+ if ( c->isFailed() ){
+ delete c;
return;
+ }
scoped_lock L(poolMutex);
pools[host]->pool.push(c);
}
diff --git a/client/dbclient.cpp b/client/dbclient.cpp
index 7d04866..f617f7c 100644
--- a/client/dbclient.cpp
+++ b/client/dbclient.cpp
@@ -106,7 +106,7 @@ namespace mongo {
/* --- dbclientcommands --- */
bool DBClientWithCommands::isOk(const BSONObj& o) {
- return o.getIntField("ok") == 1;
+ return o["ok"].trueValue();
}
inline bool DBClientWithCommands::runCommand(const string &dbname, const BSONObj& cmd, BSONObj &info, int options) {
diff --git a/client/dbclient.h b/client/dbclient.h
index ebd3b73..a2fad8e 100644
--- a/client/dbclient.h
+++ b/client/dbclient.h
@@ -919,7 +919,7 @@ namespace mongo {
bool isFailed() const {
// TODO: this really should check isFailed on current master as well
- return master > NotSetR;
+ return master < Left;
}
};
diff --git a/db/db.cpp b/db/db.cpp
index 9ff49ba..51369bb 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -429,6 +429,8 @@ namespace mongo {
class DataFileSync : public BackgroundJob {
public:
void run(){
+ if ( _sleepsecs > 2100 )
+ _sleepsecs = 2100;
log(1) << "will flush memory every: " << _sleepsecs << " seconds" << endl;
int time_flushing = 0;
while ( ! inShutdown() ){
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index 46c2e4d..85c695d 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -317,9 +317,10 @@ namespace mongo {
bool authed = cc().getAuthenticationInfo()->isAuthorizedReads("admin");
+ result.append("version", versionString);
result.append("uptime",(double) (time(0)-started));
result.appendDate( "localTime" , jsTime() );
-
+
{
BSONObjBuilder t;
@@ -339,6 +340,8 @@ namespace mongo {
BSONObjBuilder t( result.subobjStart( "mem" ) );
+ t.append("bits", ( sizeof(int*) == 4 ? 32 : 64 ) );
+
ProcessInfo p;
if ( p.supported() ){
t.appendNumber( "resident" , p.getResidentSize() );
diff --git a/db/index.cpp b/db/index.cpp
index 5ec2658..6931d93 100644
--- a/db/index.cpp
+++ b/db/index.cpp
@@ -206,6 +206,8 @@ namespace mongo {
break;
}
}
+
+ bool insertArrayNull = false;
if ( allFound ) {
if ( arrElt.eoo() ) {
@@ -231,29 +233,45 @@ namespace mongo {
}
}
else if ( fixed.size() > 1 ){
- // x : [] - need to insert undefined
- BSONObjBuilder b(_sizeTracker);
- for( unsigned j = 0; j < fixed.size(); ++j ) {
- if ( j == arrIdx )
- b.appendUndefined( "" );
- else
- b.appendAs( fixed[ j ], "" );
- }
- keys.insert( b.obj() );
+ insertArrayNull = true;
}
}
} else {
// nonterminal array element to expand, so recurse
assert( !arrElt.eoo() );
BSONObjIterator i( arrElt.embeddedObject() );
- while( i.more() ) {
- BSONElement e = i.next();
- if ( e.type() == Object )
- _getKeys( fieldNames, fixed, e.embeddedObject(), keys );
+ if ( i.more() ){
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.type() == Object )
+ _getKeys( fieldNames, fixed, e.embeddedObject(), keys );
+ }
+ }
+ else {
+ insertArrayNull = true;
}
}
- }
+
+ if ( insertArrayNull ){
+ // x : [] - need to insert undefined
+ BSONObjBuilder b(_sizeTracker);
+ for( unsigned j = 0; j < fixed.size(); ++j ) {
+ if ( j == arrIdx ){
+ b.appendUndefined( "" );
+ }
+ else {
+ BSONElement e = fixed[j];
+ if ( e.eoo() )
+ b.appendNull( "" );
+ else
+ b.appendAs( e , "" );
+ }
+ }
+ keys.insert( b.obj() );
+ }
+ }
+
/* Pull out the relevant key objects from obj, so we
can index them. Note that the set is multiple elements
only when it's a "multikey" array.
diff --git a/db/pdfile.cpp b/db/pdfile.cpp
index e46ffb7..80ae649 100644
--- a/db/pdfile.cpp
+++ b/db/pdfile.cpp
@@ -904,10 +904,9 @@ namespace mongo {
idx.head,
dl, *changes[x].added[i], idxKey, /*dupsAllowed*/true, idx);
}
- catch (AssertionException&) {
+ catch (AssertionException& e) {
ss << " exception update index ";
- out() << " caught assertion update index " << idx.indexNamespace() << '\n';
- problem() << " caught assertion update index " << idx.indexNamespace() << endl;
+ problem() << " caught assertion update index " << idx.indexNamespace() << " " << e << endl;
}
}
}
diff --git a/db/repl.cpp b/db/repl.cpp
index 137c25f..a0ac16e 100644
--- a/db/repl.cpp
+++ b/db/repl.cpp
@@ -1053,7 +1053,7 @@ namespace mongo {
BSONObj last = conn->findOne( _ns.c_str(), Query( b.done() ).sort( BSON( "$natural" << -1 ) ) );
if ( !last.isEmpty() ) {
BSONElement ts = last.getField( "ts" );
- massert( 10386 , "non Date ts found", ts.type() == Date || ts.type() == Timestamp );
+ massert( 10386 , (string)"non Date ts found:" + last.jsonString() , ts.type() == Date || ts.type() == Timestamp );
syncedTo = OpTime( ts.date() );
}
}
diff --git a/db/repl.h b/db/repl.h
index a42fa8e..eb1cb26 100644
--- a/db/repl.h
+++ b/db/repl.h
@@ -427,7 +427,7 @@ namespace mongo {
auto_ptr< Cursor > _c;
DiskLoc startLoc( const DiskLoc &rec ) {
Extent *e = rec.rec()->myExtent( rec );
- if ( e->myLoc != _qp.nsd()->capExtent )
+ if ( !_qp.nsd()->capLooped() || ( e->myLoc != _qp.nsd()->capExtent ) )
return e->firstRecord;
// Likely we are on the fresh side of capExtent, so return first fresh record.
// If we are on the stale side of capExtent, then the collection is small and it
@@ -435,14 +435,22 @@ namespace mongo {
return _qp.nsd()->capFirstNewRecord;
}
+ // should never have an empty extent in the oplog, so don't worry about that case
DiskLoc prevLoc( const DiskLoc &rec ) {
Extent *e = rec.rec()->myExtent( rec );
- if ( e->xprev.isNull() )
- e = _qp.nsd()->lastExtent.ext();
- else
- e = e->xprev.ext();
- if ( e->myLoc != _qp.nsd()->capExtent )
- return e->firstRecord;
+ if ( _qp.nsd()->capLooped() ) {
+ if ( e->xprev.isNull() )
+ e = _qp.nsd()->lastExtent.ext();
+ else
+ e = e->xprev.ext();
+ if ( e->myLoc != _qp.nsd()->capExtent )
+ return e->firstRecord;
+ } else {
+ if ( !e->xprev.isNull() ) {
+ e = e->xprev.ext();
+ return e->firstRecord;
+ }
+ }
return DiskLoc(); // reached beginning of collection
}
void createClientCursor( const DiskLoc &startLoc = DiskLoc() ) {
diff --git a/db/update.cpp b/db/update.cpp
index 7049fff..6b9df9c 100644
--- a/db/update.cpp
+++ b/db/update.cpp
@@ -807,12 +807,8 @@ namespace mongo {
continue;
}
- if ( modsIsIndexed && multi ){
- c->noteLocation();
- }
-
const BSONObj& onDisk = loc.obj();
-
+
ModSet * useMods = mods.get();
bool forceRewrite = false;
@@ -826,6 +822,11 @@ namespace mongo {
auto_ptr<ModSetState> mss = useMods->prepare( onDisk );
+ bool indexHack = multi && ( modsIsIndexed || ! mss->canApplyInPlace() );
+
+ if ( indexHack )
+ c->noteLocation();
+
if ( modsIsIndexed <= 0 && mss->canApplyInPlace() ){
mss->applyModsInPlace();// const_cast<BSONObj&>(onDisk) );
@@ -868,7 +869,7 @@ namespace mongo {
numModded++;
if ( ! multi )
break;
- if ( multi && modsIsIndexed )
+ if ( indexHack )
c->checkLocation();
continue;
}
diff --git a/db/update.h b/db/update.h
index 5d20114..3c4daab 100644
--- a/db/update.h
+++ b/db/update.h
@@ -100,12 +100,13 @@ namespace mongo {
}
}
- bool isIndexed( const set<string>& idxKeys ) const {
+ static bool isIndexed( const string& fullName , const set<string>& idxKeys ){
+ const char * fieldName = fullName.c_str();
// check if there is an index key that is a parent of mod
for( const char *dot = strchr( fieldName, '.' ); dot; dot = strchr( dot + 1, '.' ) )
if ( idxKeys.count( string( fieldName, dot - fieldName ) ) )
return true;
- string fullName = fieldName;
+
// check if there is an index key equal to mod
if ( idxKeys.count(fullName) )
return true;
@@ -113,6 +114,49 @@ namespace mongo {
set< string >::const_iterator j = idxKeys.upper_bound( fullName );
if ( j != idxKeys.end() && j->find( fullName ) == 0 && (*j)[fullName.size()] == '.' )
return true;
+
+ return false;
+ }
+
+ bool isIndexed( const set<string>& idxKeys ) const {
+ string fullName = fieldName;
+
+ if ( isIndexed( fullName , idxKeys ) )
+ return true;
+
+ if ( strstr( fieldName , "." ) ){
+ // check for a.0.1
+ StringBuilder buf( fullName.size() + 1 );
+ for ( size_t i=0; i<fullName.size(); i++ ){
+ char c = fullName[i];
+ buf << c;
+
+ if ( c != '.' )
+ continue;
+
+ if ( ! isdigit( fullName[i+1] ) )
+ continue;
+
+ bool possible = true;
+ size_t j=i+2;
+ for ( ; j<fullName.size(); j++ ){
+ char d = fullName[j];
+ if ( d == '.' )
+ break;
+ if ( isdigit( d ) )
+ continue;
+ possible = false;
+ break;
+ }
+
+ if ( possible )
+ i = j;
+ }
+ string x = buf.str();
+ if ( isIndexed( x , idxKeys ) )
+ return true;
+ }
+
return false;
}
diff --git a/dbtests/querytests.cpp b/dbtests/querytests.cpp
index f175543..24b71c4 100644
--- a/dbtests/querytests.cpp
+++ b/dbtests/querytests.cpp
@@ -978,7 +978,9 @@ namespace QueryTests {
for( int j = -1; j < i; ++j ) {
auto_ptr< DBClientCursor > c = client().query( ns(), QUERY( "ts" << GTE << j ), 0, 0, 0, QueryOption_OplogReplay );
ASSERT( c->more() );
- ASSERT_EQUALS( ( j > min ? j : min ), c->next()[ "ts" ].numberInt() );
+ BSONObj next = c->next();
+ ASSERT( !next[ "ts" ].eoo() );
+ ASSERT_EQUALS( ( j > min ? j : min ), next[ "ts" ].numberInt() );
}
}
}
@@ -987,6 +989,40 @@ namespace QueryTests {
int _old;
};
+ class FindingStartPartiallyFull : public CollectionBase {
+ public:
+ FindingStartPartiallyFull() : CollectionBase( "findingstart" ), _old( __findingStartInitialTimeout ) {
+ __findingStartInitialTimeout = 0;
+ }
+ ~FindingStartPartiallyFull() {
+ __findingStartInitialTimeout = _old;
+ }
+
+ void run() {
+ BSONObj info;
+ ASSERT( client().runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "size" << 10000 << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
+
+ int i = 0;
+ for( ; i < 150; client().insert( ns(), BSON( "ts" << i++ ) ) );
+
+ for( int k = 0; k < 5; ++k ) {
+ client().insert( ns(), BSON( "ts" << i++ ) );
+ int min = client().query( ns(), Query().sort( BSON( "$natural" << 1 ) ) )->next()[ "ts" ].numberInt();
+ for( int j = -1; j < i; ++j ) {
+ auto_ptr< DBClientCursor > c = client().query( ns(), QUERY( "ts" << GTE << j ), 0, 0, 0, QueryOption_OplogReplay );
+ ASSERT( c->more() );
+ BSONObj next = c->next();
+ ASSERT( !next[ "ts" ].eoo() );
+ ASSERT_EQUALS( ( j > min ? j : min ), next[ "ts" ].numberInt() );
+ }
+ }
+ }
+
+ private:
+ int _old;
+ };
+
+
class WhatsMyUri : public CollectionBase {
public:
WhatsMyUri() : CollectionBase( "whatsmyuri" ) {}
@@ -1069,6 +1105,7 @@ namespace QueryTests {
add< HelperTest >();
add< HelperByIdTest >();
add< FindingStart >();
+ add< FindingStartPartiallyFull >();
add< WhatsMyUri >();
add< parsedtests::basic1 >();
diff --git a/debian/changelog b/debian/changelog
index 8dff07d..f2054cb 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+mongodb (1.4.3) unstable; urgency=low
+
+ * bug fixes
+
+ -- Richard Kreuter <richard@10gen.com> Tue, 24 May 2010 16:56:28 -0500
+
mongodb (1.4.2) unstable; urgency=low
* bug fixes
diff --git a/debian/files b/debian/files
new file mode 100644
index 0000000..2e28959
--- /dev/null
+++ b/debian/files
@@ -0,0 +1 @@
+mongodb_0.9.7_amd64.deb devel optional
diff --git a/debian/mongodb.upstart b/debian/mongodb.upstart
new file mode 100644
index 0000000..ca6f9b7
--- /dev/null
+++ b/debian/mongodb.upstart
@@ -0,0 +1,15 @@
+# Ubuntu upstart file at /etc/init/mongodb.conf
+
+pre-start script
+ mkdir -p /var/lib/mongodb/
+ mkdir -p /var/log/mongodb/
+end script
+
+start on runlevel [2345]
+stop on runlevel [06]
+
+script
+ ENABLE_MONGODB="yes"
+ if [ -f /etc/default/mongodb ]; then . /etc/default/mongodb; fi
+ if [ "x$ENABLE_MONGODB" = "xyes" ]; then exec start-stop-daemon --start --quiet --chuid mongodb --exec /usr/bin/mongod -- --config /etc/mongodb.conf; fi
+end script
diff --git a/debian/preinst b/debian/preinst
new file mode 100644
index 0000000..c2d5362
--- /dev/null
+++ b/debian/preinst
@@ -0,0 +1,37 @@
+#!/bin/sh
+# preinst script for mongodb
+#
+# see: dh_installdeb(1)
+
+set -e
+
+# summary of how this script can be called:
+# * <new-preinst> `install'
+# * <new-preinst> `install' <old-version>
+# * <new-preinst> `upgrade' <old-version>
+# * <old-preinst> `abort-upgrade' <new-version>
+# for details, see http://www.debian.org/doc/debian-policy/ or
+# the debian-policy package
+
+
+case "$1" in
+ install|upgrade)
+ ;;
+
+ abort-upgrade)
+ ;;
+
+ *)
+ echo "preinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
+
+
diff --git a/doxygenConfig b/doxygenConfig
index 37ca7f2..7584a63 100644
--- a/doxygenConfig
+++ b/doxygenConfig
@@ -3,7 +3,7 @@
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = MongoDB
-PROJECT_NUMBER = 1.4.2
+PROJECT_NUMBER = 1.4.3
OUTPUT_DIRECTORY = docs
CREATE_SUBDIRS = NO
OUTPUT_LANGUAGE = English
diff --git a/jstests/clone/clonecollection.js b/jstests/clone/clonecollection.js
index 123369f..b1f9c29 100644
--- a/jstests/clone/clonecollection.js
+++ b/jstests/clone/clonecollection.js
@@ -70,18 +70,18 @@ t = startMongod( "--port", ports[ 1 ], "--dbpath", "/data/db/" + baseName + "_to
for( i = 0; i < 1000; ++i ) {
f.a.save( { i: i } );
}
-assert.eq( 1000, f.a.find().count() );
+assert.eq( 1000, f.a.find().count() , "A1" );
assert.commandWorked( t.cloneCollection( "localhost:" + ports[ 0 ], "a" ) );
-assert.eq( 1000, t.a.find().count() );
+assert.eq( 1000, t.a.find().count() , "A2" );
t.a.drop();
assert.commandWorked( t.cloneCollection( "localhost:" + ports[ 0 ], "a", { i: { $gte: 10, $lt: 20 } } ) );
-assert.eq( 10, t.a.find().count() );
+assert.eq( 10, t.a.find().count() , "A3" );
t.a.drop();
-assert.eq( 0, t.system.indexes.find().count() );
+assert.eq( 0, t.system.indexes.find().count() , "prep 2");
f.a.ensureIndex( { i: 1 } );
assert.eq( 2, f.system.indexes.find().count(), "expected index missing" );
@@ -91,7 +91,7 @@ if ( t.system.indexes.find().count() != 2 ) {
}
assert.eq( 2, t.system.indexes.find().count(), "expected index missing" );
// Verify index works
-assert.eq( 50, t.a.find( { i: 50 } ).hint( { i: 1 } ).explain().indexBounds[0][0].i );
+assert.eq( 50, t.a.find( { i: 50 } ).hint( { i: 1 } ).explain().indexBounds[0][0].i , "verify 1" );
assert.eq( 1, t.a.find( { i: 50 } ).hint( { i: 1 } ).toArray().length, "match length did not match expected" );
// Check that capped-ness is preserved on clone
@@ -119,14 +119,15 @@ f.a.save( { i: 200000 } );
f.a.save( { i: -1 } );
f.a.remove( { i: 0 } );
f.a.update( { i: 99998 }, { i: 99998, x: "y" } );
+assert.eq( 100001, f.a.count() , "D0" );
ret = finishstartclone();
finishclone( ret );
-assert.eq( 100000, t.a.find().count() );
-assert.eq( 1, t.a.find( { i: 200000 } ).count() );
-assert.eq( 0, t.a.find( { i: -1 } ).count() );
-assert.eq( 0, t.a.find( { i: 0 } ).count() );
-assert.eq( 1, t.a.find( { i: 99998, x: "y" } ).count() );
+assert.eq( 100000, t.a.find().count() , "D1" );
+assert.eq( 1, t.a.find( { i: 200000 } ).count() , "D2" );
+assert.eq( 0, t.a.find( { i: -1 } ).count() , "D3" );
+assert.eq( 0, t.a.find( { i: 0 } ).count() , "D4" );
+assert.eq( 1, t.a.find( { i: 99998, x: "y" } ).count() , "D5" );
// Now test oplog running out of space -- specify small size clone oplog for test.
@@ -136,7 +137,7 @@ t.a.drop();
for( i = 0; i < 200000; ++i ) {
f.a.save( { i: i } );
}
-assert.eq( 200000, f.a.count() );
+assert.eq( 200000, f.a.count() , "E1" );
startstartclone( ", logSizeMb:1" );
ret = finishstartclone();
@@ -145,6 +146,8 @@ for( i = 200000; i < 250000; ++i ) {
f.a.save( { i: i } );
}
+assert.eq( 250000, f.a.count() , "F0" );
+
assert.commandFailed( dofinishclonecmd( ret ) );
// Make sure the same works with standard size op log.
@@ -154,7 +157,7 @@ t.a.drop();
for( i = 0; i < 200000; ++i ) {
f.a.save( { i: i } );
}
-assert.eq( 200000, f.a.count() );
+assert.eq( 200000, f.a.count() , "F1" );
startstartclone();
ret = finishstartclone();
@@ -162,10 +165,10 @@ ret = finishstartclone();
for( i = 200000; i < 250000; ++i ) {
f.a.save( { i: i } );
}
-assert.eq( 250000, f.a.count() );
+assert.eq( 250000, f.a.count() , "F2" );
finishclone( ret );
-assert.eq( 250000, t.a.find().count() );
+assert.eq( 250000, t.a.find().count() , "F3" );
// Test startCloneCollection and finishCloneCollection commands.
f.a.drop();
@@ -174,7 +177,7 @@ t.a.drop();
for( i = 0; i < 100000; ++i ) {
f.a.save( { i: i } );
}
-assert.eq( 100000, f.a.count() );
+assert.eq( 100000, f.a.count() , "G1" );
startstartclone();
@@ -182,9 +185,9 @@ sleep( 200 );
f.a.save( { i: -1 } );
ret = finishstartclone();
-assert.eq( 100001, t.a.find().count() );
+assert.eq( 100001, t.a.find().count() , "G2" );
f.a.save( { i: -2 } );
-assert.eq( 100002, f.a.find().count() );
+assert.eq( 100002, f.a.find().count() , "G3" );
finishclone( ret );
-assert.eq( 100002, t.a.find().count() );
+assert.eq( 100002, t.a.find().count() , "G4" );
diff --git a/jstests/index_arr1.js b/jstests/index_arr1.js
new file mode 100644
index 0000000..d35cb80
--- /dev/null
+++ b/jstests/index_arr1.js
@@ -0,0 +1,23 @@
+
+t = db.index_arr1
+t.drop()
+
+t.insert( { _id : 1 , a : 5 , b : [ { x : 1 } ] } )
+t.insert( { _id : 2 , a : 5 , b : [] } )
+t.insert( { _id : 3 , a : 5 } )
+
+assert.eq( 3 , t.find( { a : 5 } ).itcount() , "A1" )
+
+t.ensureIndex( { a : 1 , "b.x" : 1 } )
+
+//t.find().sort( { a : 1 } )._addSpecial( "$returnKey" , 1 ).forEach( printjson )
+//t.find( { a : 5 } ).forEach( printjson )
+
+assert.eq( 3 , t.find( { a : 5 } ).itcount() , "A2" ); // SERVER-1082
+
+
+assert.eq( 2 , t.getIndexes().length , "B1" )
+t.insert( { _id : 4 , a : 5 , b : [] } )
+t.ensureIndex( { a : 1 , "b.a" : 1 , "b.c" : 1 } )
+assert.eq( 3 , t.getIndexes().length , "B2" )
+
diff --git a/jstests/index_arr2.js b/jstests/index_arr2.js
new file mode 100644
index 0000000..21a511b
--- /dev/null
+++ b/jstests/index_arr2.js
@@ -0,0 +1,51 @@
+NUM = 20;
+M = 5;
+
+t = db.xxx;
+
+function test( withIndex ){
+ t.drop();
+
+ // insert a bunch of items to force queries to use the index.
+ newObject = {
+ _id : 1,
+ a : [
+ { b : { c : 1 } }
+ ]
+ }
+
+ now = (new Date()).getTime() / 1000;
+ for (created = now - NUM; created <= now; created++ ) {
+ newObject['created'] = created;
+ t.insert(newObject);
+ newObject['_id'] ++;
+ }
+
+ // change the last M items.
+ query = {
+ 'created' : { '$gte' : now - M }
+ }
+
+ Z = t.find( query ).count();
+
+ if ( withIndex ){
+ //t.ensureIndex( { 'a.b.c' : 1, 'created' : -1 } )
+ //t.ensureIndex( { created : -1 } )
+ t.ensureIndex( { 'a.b.c' : 1 } , { name : "x" } )
+ }
+
+ t.update(query, { '$set' : { "a.0.b.c" : 0 } } , false , true )
+ assert.eq( Z , db.getLastErrorObj().n , "num updated withIndex:" + withIndex );
+
+ // now see how many were actually updated.
+ query['a.b.c'] = 0;
+
+ count = t.count(query);
+
+ assert.eq( Z , count , "count after withIndex:" + withIndex );
+}
+
+test( false )
+test( true );
+
+
diff --git a/jstests/update_multi3.js b/jstests/update_multi3.js
new file mode 100644
index 0000000..903d826
--- /dev/null
+++ b/jstests/update_multi3.js
@@ -0,0 +1,25 @@
+
+t = db.update_multi3;
+
+function test( useIndex ){
+ t.drop();
+
+ if ( useIndex )
+ t.ensureIndex({k:1})
+
+ for (i=0; i<10; i++) {
+ t.save({ _id : i , k: 'x', a: []});
+ }
+
+ t.update({k: 'x'}, {$push: {a: 'y'}}, false, true);
+
+ t.find( { k : "x" } ).forEach(
+ function(z){
+ assert.eq( [ "y" ] , z.a , "useIndex: " + useIndex )
+ }
+ );
+
+}
+
+test( false )
+test( true )
diff --git a/lib/libboost_thread-gcc41-mt-d-1_34_1.a b/lib/libboost_thread-gcc41-mt-d-1_34_1.a
new file mode 100644
index 0000000..09377ac
--- /dev/null
+++ b/lib/libboost_thread-gcc41-mt-d-1_34_1.a
Binary files differ
diff --git a/rpm/mongo.spec b/rpm/mongo.spec
index 526cf4b..8ad6a0f 100644
--- a/rpm/mongo.spec
+++ b/rpm/mongo.spec
@@ -1,5 +1,5 @@
Name: mongo
-Version: 1.4.2
+Version: 1.4.3
Release: mongodb_1%{?dist}
Summary: mongo client shell and tools
License: AGPL 3.0
diff --git a/stdafx.cpp b/stdafx.cpp
index 09f202a..0e4bf45 100644
--- a/stdafx.cpp
+++ b/stdafx.cpp
@@ -32,6 +32,6 @@
namespace mongo {
- const char versionString[] = "1.4.2";
+ const char versionString[] = "1.4.3";
} // namespace mongo
diff --git a/util/mmap.h b/util/mmap.h
index 947364b..c3133e4 100644
--- a/util/mmap.h
+++ b/util/mmap.h
@@ -61,6 +61,7 @@ namespace mongo {
HANDLE maphandle;
void *view;
long len;
+ string _filename;
};
void printMemInfo( const char * where );
diff --git a/util/mmap_posix.cpp b/util/mmap_posix.cpp
index 836373d..a5caf8c 100644
--- a/util/mmap_posix.cpp
+++ b/util/mmap_posix.cpp
@@ -51,6 +51,7 @@ namespace mongo {
void* MemoryMappedFile::map(const char *filename, long &length, int options) {
// length may be updated by callee.
+ _filename = filename;
theFileAllocator().allocateAsap( filename, length );
len = length;
diff --git a/util/mmap_win.cpp b/util/mmap_win.cpp
index d831d66..6168d9d 100644
--- a/util/mmap_win.cpp
+++ b/util/mmap_win.cpp
@@ -46,13 +46,14 @@ namespace mongo {
buf << s;
return buf.str();
}
-
+
unsigned mapped = 0;
- void* MemoryMappedFile::map(const char *_filename, long &length, int options) {
+ void* MemoryMappedFile::map(const char *filenameIn, long &length, int options) {
+ _filename = filenameIn;
/* big hack here: Babble uses db names with colons. doesn't seem to work on windows. temporary perhaps. */
char filename[256];
- strncpy(filename, _filename, 255);
+ strncpy(filename, filenameIn, 255);
filename[255] = 0;
{
size_t len = strlen( filename );
@@ -107,13 +108,13 @@ namespace mongo {
bool success = FlushViewOfFile(view, 0); // 0 means whole mapping
if (!success){
int err = GetLastError();
- out() << "FlushViewOfFile failed " << err << endl;
+ out() << "FlushViewOfFile failed " << err << " file: " << _filename << endl;
}
success = FlushFileBuffers(fd);
if (!success){
int err = GetLastError();
- out() << "FlushFileBuffers failed " << err << endl;
+ out() << "FlushFileBuffers failed " << err << " file: " << _filename << endl;
}
}
}
diff --git a/util/ntservice.cpp b/util/ntservice.cpp
index 602d98a..07dce20 100644
--- a/util/ntservice.cpp
+++ b/util/ntservice.cpp
@@ -17,6 +17,7 @@
#include "stdafx.h"
#include "ntservice.h"
+#include <direct.h>
#if defined(_WIN32)
@@ -32,17 +33,26 @@ namespace mongo {
}
bool ServiceController::installService( const std::wstring& serviceName, const std::wstring& displayName, const std::wstring& serviceDesc, int argc, char* argv[] ) {
+ assert(argc >= 1);
+
+ stringstream commandLine;
+
+ if ( strchr(argv[0], ':') ) { // a crude test for fully qualified path
+ commandLine << '"' << argv[0] << "\" ";
+ } else {
+ char buffer[256];
+ assert( _getcwd(buffer, 256) );
+ commandLine << '"' << buffer << '\\' << argv[0] << "\" ";
+ }
- std::string commandLine;
-
- for ( int i = 0; i < argc; i++ ) {
+ for ( int i = 1; i < argc; i++ ) {
std::string arg( argv[ i ] );
// replace install command to indicate process is being started as a service
if ( arg == "--install" )
arg = "--service";
- commandLine += arg + " ";
+ commandLine << arg << " ";
}
SC_HANDLE schSCManager = ::OpenSCManager( null, null, SC_MANAGER_ALL_ACCESS );
@@ -50,7 +60,7 @@ namespace mongo {
return false;
std::basic_ostringstream< TCHAR > commandLineWide;
- commandLineWide << commandLine.c_str();
+ commandLineWide << commandLine.str().c_str();
// create new service
SC_HANDLE schService = ::CreateService( schSCManager, serviceName.c_str(), displayName.c_str(),