summaryrefslogtreecommitdiff
path: root/db
diff options
context:
space:
mode:
Diffstat (limited to 'db')
-rw-r--r--db/db.cpp2
-rw-r--r--db/dbcommands.cpp5
-rw-r--r--db/index.cpp46
-rw-r--r--db/pdfile.cpp5
-rw-r--r--db/repl.cpp2
-rw-r--r--db/repl.h22
-rw-r--r--db/update.cpp13
-rw-r--r--db/update.h48
8 files changed, 109 insertions, 34 deletions
diff --git a/db/db.cpp b/db/db.cpp
index 9ff49ba..51369bb 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -429,6 +429,8 @@ namespace mongo {
class DataFileSync : public BackgroundJob {
public:
void run(){
+ if ( _sleepsecs > 2100 )
+ _sleepsecs = 2100;
log(1) << "will flush memory every: " << _sleepsecs << " seconds" << endl;
int time_flushing = 0;
while ( ! inShutdown() ){
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index 46c2e4d..85c695d 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -317,9 +317,10 @@ namespace mongo {
bool authed = cc().getAuthenticationInfo()->isAuthorizedReads("admin");
+ result.append("version", versionString);
result.append("uptime",(double) (time(0)-started));
result.appendDate( "localTime" , jsTime() );
-
+
{
BSONObjBuilder t;
@@ -339,6 +340,8 @@ namespace mongo {
BSONObjBuilder t( result.subobjStart( "mem" ) );
+ t.append("bits", ( sizeof(int*) == 4 ? 32 : 64 ) );
+
ProcessInfo p;
if ( p.supported() ){
t.appendNumber( "resident" , p.getResidentSize() );
diff --git a/db/index.cpp b/db/index.cpp
index 5ec2658..6931d93 100644
--- a/db/index.cpp
+++ b/db/index.cpp
@@ -206,6 +206,8 @@ namespace mongo {
break;
}
}
+
+ bool insertArrayNull = false;
if ( allFound ) {
if ( arrElt.eoo() ) {
@@ -231,29 +233,45 @@ namespace mongo {
}
}
else if ( fixed.size() > 1 ){
- // x : [] - need to insert undefined
- BSONObjBuilder b(_sizeTracker);
- for( unsigned j = 0; j < fixed.size(); ++j ) {
- if ( j == arrIdx )
- b.appendUndefined( "" );
- else
- b.appendAs( fixed[ j ], "" );
- }
- keys.insert( b.obj() );
+ insertArrayNull = true;
}
}
} else {
// nonterminal array element to expand, so recurse
assert( !arrElt.eoo() );
BSONObjIterator i( arrElt.embeddedObject() );
- while( i.more() ) {
- BSONElement e = i.next();
- if ( e.type() == Object )
- _getKeys( fieldNames, fixed, e.embeddedObject(), keys );
+ if ( i.more() ){
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.type() == Object )
+ _getKeys( fieldNames, fixed, e.embeddedObject(), keys );
+ }
+ }
+ else {
+ insertArrayNull = true;
}
}
- }
+
+ if ( insertArrayNull ){
+ // x : [] - need to insert undefined
+ BSONObjBuilder b(_sizeTracker);
+ for( unsigned j = 0; j < fixed.size(); ++j ) {
+ if ( j == arrIdx ){
+ b.appendUndefined( "" );
+ }
+ else {
+ BSONElement e = fixed[j];
+ if ( e.eoo() )
+ b.appendNull( "" );
+ else
+ b.appendAs( e , "" );
+ }
+ }
+ keys.insert( b.obj() );
+ }
+ }
+
/* Pull out the relevant key objects from obj, so we
can index them. Note that the set is multiple elements
only when it's a "multikey" array.
diff --git a/db/pdfile.cpp b/db/pdfile.cpp
index e46ffb7..80ae649 100644
--- a/db/pdfile.cpp
+++ b/db/pdfile.cpp
@@ -904,10 +904,9 @@ namespace mongo {
idx.head,
dl, *changes[x].added[i], idxKey, /*dupsAllowed*/true, idx);
}
- catch (AssertionException&) {
+ catch (AssertionException& e) {
ss << " exception update index ";
- out() << " caught assertion update index " << idx.indexNamespace() << '\n';
- problem() << " caught assertion update index " << idx.indexNamespace() << endl;
+ problem() << " caught assertion update index " << idx.indexNamespace() << " " << e << endl;
}
}
}
diff --git a/db/repl.cpp b/db/repl.cpp
index 137c25f..a0ac16e 100644
--- a/db/repl.cpp
+++ b/db/repl.cpp
@@ -1053,7 +1053,7 @@ namespace mongo {
BSONObj last = conn->findOne( _ns.c_str(), Query( b.done() ).sort( BSON( "$natural" << -1 ) ) );
if ( !last.isEmpty() ) {
BSONElement ts = last.getField( "ts" );
- massert( 10386 , "non Date ts found", ts.type() == Date || ts.type() == Timestamp );
+ massert( 10386 , (string)"non Date ts found:" + last.jsonString() , ts.type() == Date || ts.type() == Timestamp );
syncedTo = OpTime( ts.date() );
}
}
diff --git a/db/repl.h b/db/repl.h
index a42fa8e..eb1cb26 100644
--- a/db/repl.h
+++ b/db/repl.h
@@ -427,7 +427,7 @@ namespace mongo {
auto_ptr< Cursor > _c;
DiskLoc startLoc( const DiskLoc &rec ) {
Extent *e = rec.rec()->myExtent( rec );
- if ( e->myLoc != _qp.nsd()->capExtent )
+ if ( !_qp.nsd()->capLooped() || ( e->myLoc != _qp.nsd()->capExtent ) )
return e->firstRecord;
// Likely we are on the fresh side of capExtent, so return first fresh record.
// If we are on the stale side of capExtent, then the collection is small and it
@@ -435,14 +435,22 @@ namespace mongo {
return _qp.nsd()->capFirstNewRecord;
}
+ // should never have an empty extent in the oplog, so don't worry about that case
DiskLoc prevLoc( const DiskLoc &rec ) {
Extent *e = rec.rec()->myExtent( rec );
- if ( e->xprev.isNull() )
- e = _qp.nsd()->lastExtent.ext();
- else
- e = e->xprev.ext();
- if ( e->myLoc != _qp.nsd()->capExtent )
- return e->firstRecord;
+ if ( _qp.nsd()->capLooped() ) {
+ if ( e->xprev.isNull() )
+ e = _qp.nsd()->lastExtent.ext();
+ else
+ e = e->xprev.ext();
+ if ( e->myLoc != _qp.nsd()->capExtent )
+ return e->firstRecord;
+ } else {
+ if ( !e->xprev.isNull() ) {
+ e = e->xprev.ext();
+ return e->firstRecord;
+ }
+ }
return DiskLoc(); // reached beginning of collection
}
void createClientCursor( const DiskLoc &startLoc = DiskLoc() ) {
diff --git a/db/update.cpp b/db/update.cpp
index 7049fff..6b9df9c 100644
--- a/db/update.cpp
+++ b/db/update.cpp
@@ -807,12 +807,8 @@ namespace mongo {
continue;
}
- if ( modsIsIndexed && multi ){
- c->noteLocation();
- }
-
const BSONObj& onDisk = loc.obj();
-
+
ModSet * useMods = mods.get();
bool forceRewrite = false;
@@ -826,6 +822,11 @@ namespace mongo {
auto_ptr<ModSetState> mss = useMods->prepare( onDisk );
+ bool indexHack = multi && ( modsIsIndexed || ! mss->canApplyInPlace() );
+
+ if ( indexHack )
+ c->noteLocation();
+
if ( modsIsIndexed <= 0 && mss->canApplyInPlace() ){
mss->applyModsInPlace();// const_cast<BSONObj&>(onDisk) );
@@ -868,7 +869,7 @@ namespace mongo {
numModded++;
if ( ! multi )
break;
- if ( multi && modsIsIndexed )
+ if ( indexHack )
c->checkLocation();
continue;
}
diff --git a/db/update.h b/db/update.h
index 5d20114..3c4daab 100644
--- a/db/update.h
+++ b/db/update.h
@@ -100,12 +100,13 @@ namespace mongo {
}
}
- bool isIndexed( const set<string>& idxKeys ) const {
+ static bool isIndexed( const string& fullName , const set<string>& idxKeys ){
+ const char * fieldName = fullName.c_str();
// check if there is an index key that is a parent of mod
for( const char *dot = strchr( fieldName, '.' ); dot; dot = strchr( dot + 1, '.' ) )
if ( idxKeys.count( string( fieldName, dot - fieldName ) ) )
return true;
- string fullName = fieldName;
+
// check if there is an index key equal to mod
if ( idxKeys.count(fullName) )
return true;
@@ -113,6 +114,49 @@ namespace mongo {
set< string >::const_iterator j = idxKeys.upper_bound( fullName );
if ( j != idxKeys.end() && j->find( fullName ) == 0 && (*j)[fullName.size()] == '.' )
return true;
+
+ return false;
+ }
+
+ bool isIndexed( const set<string>& idxKeys ) const {
+ string fullName = fieldName;
+
+ if ( isIndexed( fullName , idxKeys ) )
+ return true;
+
+ if ( strstr( fieldName , "." ) ){
+ // check for a.0.1
+ StringBuilder buf( fullName.size() + 1 );
+ for ( size_t i=0; i<fullName.size(); i++ ){
+ char c = fullName[i];
+ buf << c;
+
+ if ( c != '.' )
+ continue;
+
+ if ( ! isdigit( fullName[i+1] ) )
+ continue;
+
+ bool possible = true;
+ size_t j=i+2;
+ for ( ; j<fullName.size(); j++ ){
+ char d = fullName[j];
+ if ( d == '.' )
+ break;
+ if ( isdigit( d ) )
+ continue;
+ possible = false;
+ break;
+ }
+
+ if ( possible )
+ i = j;
+ }
+ string x = buf.str();
+ if ( isIndexed( x , idxKeys ) )
+ return true;
+ }
+
return false;
}