summaryrefslogtreecommitdiff
path: root/db
diff options
context:
space:
mode:
Diffstat (limited to 'db')
-rw-r--r--db/btree.cpp1
-rw-r--r--db/btreecursor.cpp1
-rw-r--r--db/cloner.cpp2
-rw-r--r--db/db.cpp34
-rw-r--r--db/dbcommands.cpp15
-rw-r--r--db/dbhelpers.cpp2
-rw-r--r--db/dur_commitjob.cpp5
-rw-r--r--db/geo/2d.cpp9
-rw-r--r--db/index.h7
-rw-r--r--db/namespace.cpp13
-rw-r--r--db/namespace.h1
-rw-r--r--db/pdfile.cpp1
-rw-r--r--db/update.cpp3
13 files changed, 86 insertions, 8 deletions
diff --git a/db/btree.cpp b/db/btree.cpp
index 242c534..299c212 100644
--- a/db/btree.cpp
+++ b/db/btree.cpp
@@ -1642,6 +1642,7 @@ namespace mongo {
}
DiskLoc BtreeBucket::findSingle( const IndexDetails& indexdetails , const DiskLoc& thisLoc, const BSONObj& key ) const {
+ indexdetails.checkVersion();
int pos;
bool found;
// TODO: is it really ok here that the order is a default?
diff --git a/db/btreecursor.cpp b/db/btreecursor.cpp
index 9cab95f..ce841ce 100644
--- a/db/btreecursor.cpp
+++ b/db/btreecursor.cpp
@@ -73,6 +73,7 @@ namespace mongo {
}
void BtreeCursor::audit() {
+ indexDetails.checkVersion();
dassert( d->idxNo((IndexDetails&) indexDetails) == idxNo );
if ( otherTraceLevel >= 12 ) {
diff --git a/db/cloner.cpp b/db/cloner.cpp
index fe57463..ec5ba99 100644
--- a/db/cloner.cpp
+++ b/db/cloner.cpp
@@ -624,6 +624,8 @@ namespace mongo {
nsToDatabase( target.c_str(), to );
if ( strcmp( from, to ) == 0 ) {
renameNamespace( source.c_str(), target.c_str() );
+ // make sure we drop counters etc
+ Top::global.collectionDropped( source );
return true;
}
}
diff --git a/db/db.cpp b/db/db.cpp
index 579b4a1..4f4575c 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -46,6 +46,7 @@
# include "../util/ntservice.h"
#else
# include <sys/file.h>
+# include <sys/resource.h>
#endif
namespace mongo {
@@ -108,7 +109,36 @@ namespace mongo {
}
try {
+#ifndef __linux__ // TODO: consider making this ifdef _WIN32
boost::thread thr(boost::bind(&connThread,mp));
+#else
+ pthread_attr_t attrs;
+ pthread_attr_init(&attrs);
+ pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED);
+
+ static const size_t STACK_SIZE = 4*1024*1024;
+
+ struct rlimit limits;
+ assert(getrlimit(RLIMIT_STACK, &limits) == 0);
+ if (limits.rlim_cur > STACK_SIZE) {
+ pthread_attr_setstacksize(&attrs, (DEBUG_BUILD
+ ? (STACK_SIZE / 2)
+ : STACK_SIZE));
+ }
+ else if (limits.rlim_cur < 1024*1024) {
+ warning() << "Stack size set to " << (limits.rlim_cur/1024) << "KB. We suggest at least 1MB" << endl;
+ }
+
+ pthread_t thread;
+ int failed = pthread_create(&thread, &attrs, (void*(*)(void*)) &connThread, mp);
+
+ pthread_attr_destroy(&attrs);
+
+ if (failed) {
+ log() << "pthread_create failed: " << errnoWithDescription(failed) << endl;
+ throw boost::thread_resource_error(); // for consistency with boost::thread
+ }
+#endif
}
catch ( boost::thread_resource_error& ) {
log() << "can't create new thread, closing connection" << endl;
@@ -699,6 +729,7 @@ int main(int argc, char* argv[]) {
("pairwith", po::value<string>(), "address of server to pair with DEPRECATED")
("arbiter", po::value<string>(), "address of replica pair arbiter server DEPRECATED")
("nodur", "disable journaling (currently the default)")
+ ("nojournal", "disable journaling (currently the default)")
("appsrvpath", po::value<string>(), "root directory for the babble app server")
("nocursors", "diagnostic/debugging option that turns off cursors DO NOT USE IN PRODUCTION")
("nohints", "ignore query hints")
@@ -800,6 +831,9 @@ int main(int argc, char* argv[]) {
if( params.count("nodur") ) {
cmdLine.dur = false;
}
+ if( params.count("nojournal") ) {
+ cmdLine.dur = false;
+ }
if( params.count("dur") || params.count( "journal" ) ) {
cmdLine.dur = true;
}
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index cf0857a..59dd78c 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -851,6 +851,17 @@ namespace mongo {
for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); i++ ) {
BSONObj o = *i;
+ if ( o.getIntField("v") > 0 ) {
+ BSONObjBuilder b;
+ BSONObjIterator i( o );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( str::equals( e.fieldName() , "v" ) )
+ continue;
+ b.append( e );
+ }
+ o = b.obj();
+ }
theDataFileMgr.insertWithObjMod( Namespace( toDeleteNs.c_str() ).getSisterNS( "system.indexes" ).c_str() , o , true );
}
@@ -1753,6 +1764,7 @@ namespace mongo {
}
if ( cmdObj["help"].trueValue() ) {
+ client.curop()->ensureStarted();
stringstream ss;
ss << "help for: " << c->name << " ";
c->help( ss );
@@ -1777,6 +1789,7 @@ namespace mongo {
if ( c->locktype() == Command::NONE ) {
// we also trust that this won't crash
+ client.curop()->ensureStarted();
string errmsg;
int ok = c->run( dbname , cmdObj , errmsg , result , fromRepl );
if ( ! ok )
@@ -1791,6 +1804,7 @@ namespace mongo {
}
mongolock lk( needWriteLock );
+ client.curop()->ensureStarted();
Client::Context ctx( dbname , dbpath , &lk , c->requiresAuth() );
try {
@@ -1824,7 +1838,6 @@ namespace mongo {
returns true if ran a cmd
*/
bool _runCommands(const char *ns, BSONObj& _cmdobj, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl, int queryOptions) {
- cc().curop()->ensureStarted();
string dbname = nsToDatabase( ns );
if( logLevel >= 1 )
diff --git a/db/dbhelpers.cpp b/db/dbhelpers.cpp
index 3079aad..5e49589 100644
--- a/db/dbhelpers.cpp
+++ b/db/dbhelpers.cpp
@@ -269,7 +269,7 @@ namespace mongo {
getDur().commitIfNeeded();
- if ( yield && ! cc->yieldSometimes() ) {
+ if ( yield && ! cc->yield() ) {
// cursor got finished by someone else, so we're done
cc.release(); // if the collection/db is dropped, cc may be deleted
break;
diff --git a/db/dur_commitjob.cpp b/db/dur_commitjob.cpp
index c67f37c..af77c4f 100644
--- a/db/dur_commitjob.cpp
+++ b/db/dur_commitjob.cpp
@@ -206,9 +206,10 @@ namespace mongo {
// throttle logging
if( ++nComplains < 100 || time(0) - lastComplain >= 60 ) {
lastComplain = time(0);
- log() << "replSet warning DR102 too much data written uncommitted " << _bytes/1000000.0 << "MB" << endl;
+ warning() << "DR102 too much data written uncommitted " << _bytes/1000000.0 << "MB" << endl;
if( nComplains < 10 || nComplains % 10 == 0 ) {
- wassert(!"replSet warning DR102 too much data written uncommitted");
+ // wassert makes getLastError show an error, so we just print stack trace
+ printStackTrace();
}
}
}
diff --git a/db/geo/2d.cpp b/db/geo/2d.cpp
index d6c97f6..7b2bf17 100644
--- a/db/geo/2d.cpp
+++ b/db/geo/2d.cpp
@@ -1125,7 +1125,14 @@ namespace mongo {
virtual Record* _current() { assert(ok()); return _cur->_loc.rec(); }
virtual BSONObj current() { assert(ok()); return _cur->_o; }
virtual DiskLoc currLoc() { assert(ok()); return _cur->_loc; }
- virtual bool advance() { _cur++; incNscanned(); return ok(); }
+ virtual bool advance() {
+ if( ok() ){
+ _cur++;
+ incNscanned();
+ return ok();
+ }
+ return false;
+ }
virtual BSONObj currKey() const { return _cur->_key; }
virtual string toString() {
diff --git a/db/index.h b/db/index.h
index 8578ed3..d13bd1d 100644
--- a/db/index.h
+++ b/db/index.h
@@ -145,6 +145,13 @@ namespace mongo {
const IndexSpec& getSpec() const;
+ void checkVersion() const {
+ // TODO: cache?
+ massert( 13658 ,
+ str::stream() << "using a newer index version: " << info.obj() << " v: " << info.obj().getIntField("v" ) ,
+ info.obj().getIntField("v") <= 0 );
+ }
+
string toString() const {
return info.obj().toString();
}
diff --git a/db/namespace.cpp b/db/namespace.cpp
index fcdaee2..0cb0e74 100644
--- a/db/namespace.cpp
+++ b/db/namespace.cpp
@@ -598,6 +598,17 @@ namespace mongo {
}
}
+ void NamespaceDetailsTransient::eraseForPrefix(const char *prefix) {
+ assertInWriteLock();
+ vector< string > found;
+ for( ouriter i = _map.begin(); i != _map.end(); ++i )
+ if ( strncmp( i->first.c_str(), prefix, strlen( prefix ) ) == 0 )
+ found.push_back( i->first );
+ for( vector< string >::iterator i = found.begin(); i != found.end(); ++i ) {
+ _map.erase(*i);
+ }
+ }
+
void NamespaceDetailsTransient::computeIndexKeys() {
_keysComputed = true;
_indexKeys.clear();
@@ -648,7 +659,7 @@ namespace mongo {
// index details across commands are in cursors and nsd
// transient (including query cache) so clear these.
ClientCursor::invalidate( from );
- NamespaceDetailsTransient::clearForPrefix( from );
+ NamespaceDetailsTransient::eraseForPrefix( from );
NamespaceDetails *details = ni->details( from );
ni->add_ns( to, *details );
diff --git a/db/namespace.h b/db/namespace.h
index 4ec1edd..ef3d04e 100644
--- a/db/namespace.h
+++ b/db/namespace.h
@@ -425,6 +425,7 @@ namespace mongo {
Can be useful as index namespaces share the same start as the regular collection.
SLOW - sequential scan of all NamespaceDetailsTransient objects */
static void clearForPrefix(const char *prefix);
+ static void eraseForPrefix(const char *prefix);
/* indexKeys() cache ---------------------------------------------------- */
/* assumed to be in write lock for this */
diff --git a/db/pdfile.cpp b/db/pdfile.cpp
index 663ae05..2aedfd4 100644
--- a/db/pdfile.cpp
+++ b/db/pdfile.cpp
@@ -806,6 +806,7 @@ namespace mongo {
result.append("ns", name.c_str());
ClientCursor::invalidate(name.c_str());
Top::global.collectionDropped( name );
+ NamespaceDetailsTransient::eraseForPrefix( name.c_str() );
dropNS(name);
}
diff --git a/db/update.cpp b/db/update.cpp
index e53f2af..8dc6c85 100644
--- a/db/update.cpp
+++ b/db/update.cpp
@@ -1225,8 +1225,7 @@ namespace mongo {
}
}
- if (atomic)
- getDur().commitIfNeeded();
+ getDur().commitIfNeeded();
continue;
}