summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAntonin Kral <a.kral@bobek.cz>2012-03-30 21:40:47 +0200
committerAntonin Kral <a.kral@bobek.cz>2012-03-30 21:40:47 +0200
commitf4cdad7de83b306b815e03435883794423b7a010 (patch)
tree3bf78e89033459d7bd6e7a92265a5085308c2b08
parent4bf70009546d751d87bc4912e9193341429f4b54 (diff)
parenteaaa7b30c99b89b5483e0a372bb73fe8c8695185 (diff)
downloadmongodb-f4cdad7de83b306b815e03435883794423b7a010.tar.gz
Merge tag 'upstream/2.0.4'
Upstream version 2.0.4
-rw-r--r--SConstruct27
-rw-r--r--buildscripts/utils.py23
-rw-r--r--client/connpool.cpp2
-rw-r--r--db/dur_journal.cpp2
-rw-r--r--db/oplog.cpp8
-rw-r--r--db/ops/query.cpp14
-rw-r--r--db/repl/rs.cpp9
-rw-r--r--dbtests/threadedtests.cpp85
-rw-r--r--doxygenConfig2
-rw-r--r--jstests/getlog2.js41
-rw-r--r--jstests/sharding/shard3.js8
-rw-r--r--rpm/mongo.spec2
-rw-r--r--s/d_chunk_manager.cpp2
-rw-r--r--s/d_logic.h3
-rw-r--r--s/d_state.cpp28
-rw-r--r--s/shardconnection.cpp2
-rw-r--r--shell/collection.js2
-rw-r--r--shell/dbshell.cpp5
-rw-r--r--util/goodies.h55
-rw-r--r--util/version.cpp2
20 files changed, 261 insertions, 61 deletions
diff --git a/SConstruct b/SConstruct
index dbe95a3..e536f8a 100644
--- a/SConstruct
+++ b/SConstruct
@@ -817,29 +817,6 @@ def add_exe(target):
return target + ".exe"
return target
-def smoke_python_name():
- # if this script is being run by py2.5 or greater,
- # then we assume that "python" points to a 2.5 or
- # greater python VM. otherwise, explicitly use 2.5
- # which we assume to be installed.
- import subprocess
- version = re.compile(r'[Pp]ython ([\d\.]+)', re.MULTILINE)
- binaries = ['python2.5', 'python2.6', 'python2.7', 'python25', 'python26', 'python27', 'python']
- for binary in binaries:
- try:
- # py-2.4 compatible replacement for shell backticks
- out, err = subprocess.Popen([binary, '-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
- for stream in (out, err):
- match = version.search(stream)
- if match:
- versiontuple = tuple(map(int, match.group(1).split('.')))
- if versiontuple >= (2, 5):
- return binary
- except:
- pass
- # if that all fails, fall back to "python"
- return "python"
-
def setupBuildInfoFile( outFile ):
version = utils.getGitVersion()
if len(moduleNames) > 0:
@@ -1253,7 +1230,7 @@ def addSmoketest( name, deps ):
else:
target = name[5].lower() + name[6:]
- addTest(name, deps, [ smoke_python_name() + " buildscripts/smoke.py " + " ".join(smokeFlags) + ' ' + target ])
+ addTest(name, deps, [ utils.smoke_python_name() + " buildscripts/smoke.py " + " ".join(smokeFlags) + ' ' + target ])
addSmoketest( "smoke", [ add_exe( "test" ) ] )
addSmoketest( "smokePerf", [ "perftest" ] )
@@ -1651,7 +1628,7 @@ def build_and_test_client(env, target, source):
call(scons_command + ["libmongoclient.a", "clientTests"], cwd=installDir)
- return bool(call([smoke_python_name(), "buildscripts/smoke.py",
+ return bool(call([utils.smoke_python_name(), "buildscripts/smoke.py",
"--test-path", installDir, "client"]))
env.Alias("clientBuild", [mongod, installDir], [build_and_test_client])
env.AlwaysBuild("clientBuild")
diff --git a/buildscripts/utils.py b/buildscripts/utils.py
index 8021d87..91409c7 100644
--- a/buildscripts/utils.py
+++ b/buildscripts/utils.py
@@ -134,3 +134,26 @@ def didMongodStart( port=27017 , timeout=20 ):
timeout = timeout - 1
return False
+def smoke_python_name():
+ # if this script is being run by py2.5 or greater,
+ # then we assume that "python" points to a 2.5 or
+ # greater python VM. otherwise, explicitly use 2.5
+ # which we assume to be installed.
+ import subprocess
+ version = re.compile(r'[Pp]ython ([\d\.]+)', re.MULTILINE)
+ binaries = ['python2.5', 'python2.6', 'python2.7', 'python25', 'python26', 'python27', 'python']
+ for binary in binaries:
+ try:
+ # py-2.4 compatible replacement for shell backticks
+ out, err = subprocess.Popen([binary, '-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ for stream in (out, err):
+ match = version.search(stream)
+ if match:
+ versiontuple = tuple(map(int, match.group(1).split('.')))
+ if versiontuple >= (2, 5):
+ return binary
+ except:
+ pass
+ # if that all fails, fall back to "python"
+ return "python"
+
diff --git a/client/connpool.cpp b/client/connpool.cpp
index 94ce4ec..ca3713d 100644
--- a/client/connpool.cpp
+++ b/client/connpool.cpp
@@ -405,7 +405,7 @@ namespace mongo {
if ( _conn ) {
if ( ! _conn->isFailed() ) {
/* see done() comments above for why we log this line */
- log() << "~ScopedDbConnection: _conn != null" << endl;
+ log() << "scoped connection to " << _conn->getServerAddress() << " not being returned to the pool" << endl;
}
kill();
}
diff --git a/db/dur_journal.cpp b/db/dur_journal.cpp
index 95a95c9..d7ef58f 100644
--- a/db/dur_journal.cpp
+++ b/db/dur_journal.cpp
@@ -687,7 +687,7 @@ namespace mongo {
// must already be open -- so that _curFileId is correct for previous buffer building
assert( _curLogFile );
- stats.curr->_uncompressedBytes += b.len();
+ stats.curr->_uncompressedBytes += uncompressed.len();
unsigned w = b.len();
_written += w;
assert( w <= L );
diff --git a/db/oplog.cpp b/db/oplog.cpp
index 6e62607..f4521f2 100644
--- a/db/oplog.cpp
+++ b/db/oplog.cpp
@@ -629,16 +629,16 @@ namespace mongo {
OplogReader missingObjReader;
const char *ns = o.getStringField("ns");
+ // should already have write lock
+ Client::Context ctx(ns);
+
// capped collections
NamespaceDetails *nsd = nsdetails(ns);
if (nsd && nsd->capped) {
log() << "replication missing doc, but this is okay for a capped collection (" << ns << ")" << endl;
return false;
}
-
- // should already have write lock
- Client::Context ctx(ns);
-
+
// we don't have the object yet, which is possible on initial sync. get it.
log() << "replication info adding missing object" << endl; // rare enough we can log
uassert(15916, str::stream() << "Can no longer connect to initial sync source: " << hn, missingObjReader.connect(hn));
diff --git a/db/ops/query.cpp b/db/ops/query.cpp
index 36f2536..1010912 100644
--- a/db/ops/query.cpp
+++ b/db/ops/query.cpp
@@ -816,6 +816,7 @@ namespace mongo {
curop.debug().ns = ns;
curop.debug().ntoreturn = pq.getNumToReturn();
+ curop.debug().query = jsobj;
curop.setQuery(jsobj);
if ( pq.couldBeCommand() ) {
@@ -916,6 +917,19 @@ namespace mongo {
Client& c = cc();
bool found = Helpers::findById( c, ns , query , resObject , &nsFound , &indexFound );
if ( nsFound == false || indexFound == true ) {
+
+ if ( shardingState.needShardChunkManager( ns ) ) {
+ ShardChunkManagerPtr m = shardingState.getShardChunkManager( ns );
+ if ( m && ! m->belongsToMe( resObject ) ) {
+ // I have something this _id
+ // but it doesn't belong to me
+ // so return nothing
+ resObject = BSONObj();
+ found = false;
+ }
+ }
+
+
BufBuilder bb(sizeof(QueryResult)+resObject.objsize()+32);
bb.skip(sizeof(QueryResult));
diff --git a/db/repl/rs.cpp b/db/repl/rs.cpp
index f827291..23abc24 100644
--- a/db/repl/rs.cpp
+++ b/db/repl/rs.cpp
@@ -100,17 +100,20 @@ namespace mongo {
lock lk(this);
Member *max = 0;
-
- for (set<unsigned>::iterator it = _electableSet.begin(); it != _electableSet.end(); it++) {
+ set<unsigned>::iterator it = _electableSet.begin();
+ while ( it != _electableSet.end() ) {
const Member *temp = findById(*it);
if (!temp) {
log() << "couldn't find member: " << *it << endl;
- _electableSet.erase(*it);
+ set<unsigned>::iterator it_delete = it;
+ it++;
+ _electableSet.erase(it_delete);
continue;
}
if (!max || max->config().priority < temp->config().priority) {
max = (Member*)temp;
}
+ it++;
}
return max;
diff --git a/dbtests/threadedtests.cpp b/dbtests/threadedtests.cpp
index 3a5ee10..cdee052 100644
--- a/dbtests/threadedtests.cpp
+++ b/dbtests/threadedtests.cpp
@@ -544,6 +544,15 @@ namespace ThreadedTests {
};
#endif
+ void sleepalittle() {
+ Timer t;
+ while( 1 ) {
+ boost::this_thread::yield();
+ if( t.micros() > 8 )
+ break;
+ }
+ }
+
class WriteLocksAreGreedy : public ThreadedTest<3> {
public:
WriteLocksAreGreedy() : m("gtest") {}
@@ -579,6 +588,81 @@ namespace ThreadedTests {
}
};
+ // Tests waiting on the TicketHolder by running many more threads than can fit into the "hotel", but only
+ // max _nRooms threads should ever get in at once
+ class TicketHolderWaits : public ThreadedTest<10> {
+
+ static const int checkIns = 1000;
+ static const int rooms = 3;
+
+ public:
+ TicketHolderWaits() : _hotel( rooms ), _tickets( _hotel._nRooms ) {}
+
+ private:
+
+ class Hotel {
+ public:
+ Hotel( int nRooms ) : _frontDesk( "frontDesk" ), _nRooms( nRooms ), _checkedIn( 0 ), _maxRooms( 0 ) {}
+
+ void checkIn(){
+ scoped_lock lk( _frontDesk );
+ _checkedIn++;
+ assert( _checkedIn <= _nRooms );
+ if( _checkedIn > _maxRooms ) _maxRooms = _checkedIn;
+ }
+
+ void checkOut(){
+ scoped_lock lk( _frontDesk );
+ _checkedIn--;
+ assert( _checkedIn >= 0 );
+ }
+
+ mongo::mutex _frontDesk;
+ int _nRooms;
+ int _checkedIn;
+ int _maxRooms;
+ };
+
+ Hotel _hotel;
+ TicketHolder _tickets;
+
+ virtual void subthread(int x) {
+
+ string threadName = ( str::stream() << "ticketHolder" << x );
+ Client::initThread( threadName.c_str() );
+
+ for( int i = 0; i < checkIns; i++ ){
+
+ _tickets.waitForTicket();
+ TicketHolderReleaser whenDone( &_tickets );
+
+ _hotel.checkIn();
+
+ sleepalittle();
+ if( i == checkIns - 1 ) sleepsecs( 2 );
+
+ _hotel.checkOut();
+
+ if( ( i % ( checkIns / 10 ) ) == 0 )
+ log() << "checked in " << i << " times..." << endl;
+
+ }
+
+ cc().shutdown();
+
+ }
+
+ virtual void validate() {
+
+ // This should always be true, assuming that it takes < 1 sec for the hardware to process a check-out/check-in
+ // Time for test is then ~ #threads / _nRooms * 2 seconds
+ assert( _hotel._maxRooms == _hotel._nRooms );
+
+ }
+
+ };
+
+
class All : public Suite {
public:
All() : Suite( "threading" ) { }
@@ -600,6 +684,7 @@ namespace ThreadedTests {
add< RWLockTest4 >();
add< MongoMutexTest >();
+ add< TicketHolderWaits >();
}
} myall;
}
diff --git a/doxygenConfig b/doxygenConfig
index 1fbcce0..55cee20 100644
--- a/doxygenConfig
+++ b/doxygenConfig
@@ -3,7 +3,7 @@
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = MongoDB
-PROJECT_NUMBER = 2.0.3
+PROJECT_NUMBER = 2.0.4
OUTPUT_DIRECTORY = docs/doxygen
CREATE_SUBDIRS = NO
OUTPUT_LANGUAGE = English
diff --git a/jstests/getlog2.js b/jstests/getlog2.js
new file mode 100644
index 0000000..3f8a8f8
--- /dev/null
+++ b/jstests/getlog2.js
@@ -0,0 +1,41 @@
+// tests getlog as well as slow querying logging
+
+glcol = db.getLogTest2;
+glcol.drop()
+
+contains = function(arr, func) {
+ var i = arr.length;
+ while (i--) {
+ if (func(arr[i])) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// test doesn't work when talking to mongos
+if(db.isMaster().msg != "isdbgrid") {
+ // run a slow query
+ glcol.save({ "SENTINEL": 1 });
+ glcol.findOne({ "SENTINEL": 1, "$where": function() { sleep(1000); return true; } });
+
+ // run a slow update
+ glcol.update({ "SENTINEL": 1, "$where": function() { sleep(1000); return true; } }, { "x": "x" });
+
+ var resp = db.adminCommand({getLog:"global"});
+ assert( resp.ok == 1, "error executing getLog command" );
+ assert( resp.log, "no log field" );
+ assert( resp.log.length > 0 , "no log lines" );
+
+ // ensure that slow query is logged in detail
+ assert( contains(resp.log, function(v) {
+ print(v);
+ return v.indexOf(" query ") != -1 && v.indexOf("query:") != -1 && v.indexOf("SENTINEL") != -1;
+ }) );
+
+ // same, but for update
+ assert( contains(resp.log, function(v) {
+ print(v);
+ return v.indexOf(" update ") != -1 && v.indexOf("query:") != -1 && v.indexOf("SENTINEL") != -1;
+ }) );
+}
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index e27316e..8ee566e 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -61,7 +61,7 @@ function doCounts( name , total , onlyItCounts ){
}
var total = doCounts( "before wrong save" )
-secondary.save( { num : -3 } );
+secondary.save( { _id : 111 , num : -3 } );
printjson( secondary.getDB().getLastError() )
doCounts( "after wrong save" , total , true )
e = a.find().explain();
@@ -69,6 +69,12 @@ assert.eq( 3 , e.n , "ex1" )
assert.eq( 4 , e.nscanned , "ex2" )
assert.eq( 1 , e.nChunkSkips , "ex3" )
+
+// SERVER-4612
+// make sure idhack obeys chunks
+x = a.findOne( { _id : 111 } )
+assert( ! x , "idhack didn't obey chunk boundaries " + tojson(x) );
+
// --- move all to 1 ---
print( "MOVE ALL TO 1" );
diff --git a/rpm/mongo.spec b/rpm/mongo.spec
index 995310b..4d73803 100644
--- a/rpm/mongo.spec
+++ b/rpm/mongo.spec
@@ -1,5 +1,5 @@
Name: mongo
-Version: 2.0.3
+Version: 2.0.4
Release: mongodb_1%{?dist}
Summary: mongo client shell and tools
License: AGPL 3.0
diff --git a/s/d_chunk_manager.cpp b/s/d_chunk_manager.cpp
index 82a06f6..351df15 100644
--- a/s/d_chunk_manager.cpp
+++ b/s/d_chunk_manager.cpp
@@ -40,7 +40,7 @@ namespace mongo {
conn = direct.get();
}
else {
- scoped.reset( new ScopedDbConnection( configServer ) );
+ scoped.reset( new ScopedDbConnection( configServer, 30.0 ) );
conn = scoped->get();
}
diff --git a/s/d_logic.h b/s/d_logic.h
index d96f937..dca0ee1 100644
--- a/s/d_logic.h
+++ b/s/d_logic.h
@@ -147,6 +147,9 @@ namespace mongo {
// protects state below
mutable mongo::mutex _mutex;
+ // protects accessing the config server
+ // Using a ticket holder so we can have multiple redundant tries at any given time
+ mutable TicketHolder _configServerTickets;
// map from a namespace into the ensemble of chunk ranges that are stored in this mongod
// a ShardChunkManager carries all state we need for a collection at this shard, including its version information
diff --git a/s/d_state.cpp b/s/d_state.cpp
index 929d2a8..638d8c1 100644
--- a/s/d_state.cpp
+++ b/s/d_state.cpp
@@ -45,7 +45,8 @@ namespace mongo {
// -----ShardingState START ----
ShardingState::ShardingState()
- : _enabled(false) , _mutex( "ShardingState" ) {
+ : _enabled(false) , _mutex( "ShardingState" ),
+ _configServerTickets( 3 /* max number of concurrent config server refresh threads */ ) {
}
void ShardingState::enable( const string& server ) {
@@ -183,7 +184,23 @@ namespace mongo {
bool ShardingState::trySetVersion( const string& ns , ConfigVersion& version /* IN-OUT */ ) {
- // fast path - requested version is at the same version as this chunk manager
+ // Currently this function is called after a getVersion(), which is the first "check", and the assumption here
+ // is that we don't do anything nearly as long as a remote query in a thread between then and now.
+ // Otherwise it may be worth adding an additional check without the _configServerMutex below, since then it
+ // would be likely that the version may have changed in the meantime without waiting for or fetching config results.
+
+ // TODO: Mutex-per-namespace?
+
+ LOG( 2 ) << "trying to set shard version of " << version.toString() << " for '" << ns << "'" << endl;
+
+ _configServerTickets.waitForTicket();
+ TicketHolderReleaser needTicketFrom( &_configServerTickets );
+
+ // fast path - double-check if requested version is at the same version as this chunk manager before verifying
+ // against config server
+ //
+ // This path will short-circuit the version set if another thread already managed to update the version in the
+ // meantime. First check is from getVersion().
//
// cases:
// + this shard updated the version for a migrate's commit (FROM side)
@@ -191,12 +208,15 @@ namespace mongo {
// + two clients reloaded
// one triggered the 'slow path' (below)
// when the second's request gets here, the version is already current
+ ConfigVersion storedVersion;
{
scoped_lock lk( _mutex );
ChunkManagersMap::const_iterator it = _chunks.find( ns );
- if ( it != _chunks.end() && it->second->getVersion() == version )
+ if ( it != _chunks.end() && ( storedVersion = it->second->getVersion() ) == version )
return true;
}
+
+ LOG( 2 ) << "verifying cached version " << storedVersion.toString() << " and new version " << version.toString() << " for '" << ns << "'" << endl;
// slow path - requested version is different than the current chunk manager's, if one exists, so must check for
// newest version in the config server
@@ -209,8 +229,10 @@ namespace mongo {
// the secondary had no state (managers) at all, so every client request will fall here
// + a stale client request a version that's not current anymore
+ // Can't lock default mutex while creating ShardChunkManager, b/c may have to create a new connection to myself
const string c = (_configServer == _shardHost) ? "" /* local */ : _configServer;
ShardChunkManagerPtr p( new ShardChunkManager( c , ns , _shardName ) );
+
{
scoped_lock lk( _mutex );
diff --git a/s/shardconnection.cpp b/s/shardconnection.cpp
index 89b3839..471e37b 100644
--- a/s/shardconnection.cpp
+++ b/s/shardconnection.cpp
@@ -266,7 +266,7 @@ namespace mongo {
if ( _conn ) {
if ( ! _conn->isFailed() ) {
/* see done() comments above for why we log this line */
- log() << "~ScopedDBConnection: _conn != null" << endl;
+ log() << "sharded connection to " << _conn->getServerAddress() << " not being returned to the pool" << endl;
}
kill();
}
diff --git a/shell/collection.js b/shell/collection.js
index cb7035d..1b8e488 100644
--- a/shell/collection.js
+++ b/shell/collection.js
@@ -703,7 +703,7 @@ DBCollection.prototype.getShardDistribution = function(){
var shardStats = stats.shards[ shard ]
- var chunks = config.chunks.find({ _id : sh._collRE( coll ), shard : shard }).toArray()
+ var chunks = config.chunks.find({ _id : sh._collRE( this ), shard : shard }).toArray()
numChunks += chunks.length
diff --git a/shell/dbshell.cpp b/shell/dbshell.cpp
index 443973f..34f2a34 100644
--- a/shell/dbshell.cpp
+++ b/shell/dbshell.cpp
@@ -126,8 +126,11 @@ void shellHistoryAdd( const char * line ) {
return;
lastLine = line;
- if ((strstr(line, ".auth")) == NULL)
+ if ( strstr( line, ".auth") == NULL &&
+ strstr( line, ".addUser") == NULL )
+ {
linenoiseHistoryAdd( line );
+ }
#endif
}
diff --git a/util/goodies.h b/util/goodies.h
index 65bfbab..735aa8b 100644
--- a/util/goodies.h
+++ b/util/goodies.h
@@ -324,31 +324,41 @@ namespace mongo {
bool tryAcquire() {
scoped_lock lk( _mutex );
- if ( _num <= 0 ) {
- if ( _num < 0 ) {
- cerr << "DISASTER! in TicketHolder" << endl;
- }
- return false;
+ return _tryAcquire();
+ }
+
+ void waitForTicket() {
+ scoped_lock lk( _mutex );
+
+ while( ! _tryAcquire() ) {
+ _newTicket.wait( lk.boost() );
}
- _num--;
- return true;
}
void release() {
- scoped_lock lk( _mutex );
- _num++;
+ {
+ scoped_lock lk( _mutex );
+ _num++;
+ }
+ _newTicket.notify_one();
}
void resize( int newSize ) {
- scoped_lock lk( _mutex );
- int used = _outof - _num;
- if ( used > newSize ) {
- cout << "ERROR: can't resize since we're using (" << used << ") more than newSize(" << newSize << ")" << endl;
- return;
+ {
+ scoped_lock lk( _mutex );
+
+ int used = _outof - _num;
+ if ( used > newSize ) {
+ cout << "ERROR: can't resize since we're using (" << used << ") more than newSize(" << newSize << ")" << endl;
+ return;
+ }
+
+ _outof = newSize;
+ _num = _outof - used;
}
- _outof = newSize;
- _num = _outof - used;
+ // Potentially wasteful, but easier to see is correct
+ _newTicket.notify_all();
}
int available() const {
@@ -362,9 +372,22 @@ namespace mongo {
int outof() const { return _outof; }
private:
+
+ bool _tryAcquire(){
+ if ( _num <= 0 ) {
+ if ( _num < 0 ) {
+ cerr << "DISASTER! in TicketHolder" << endl;
+ }
+ return false;
+ }
+ _num--;
+ return true;
+ }
+
int _outof;
int _num;
mongo::mutex _mutex;
+ boost::condition_variable_any _newTicket;
};
class TicketHolderReleaser {
diff --git a/util/version.cpp b/util/version.cpp
index 883c208..c644e75 100644
--- a/util/version.cpp
+++ b/util/version.cpp
@@ -38,7 +38,7 @@ namespace mongo {
* 1.2.3-rc4-pre-
* If you really need to do something else you'll need to fix _versionArray()
*/
- const char versionString[] = "2.0.3";
+ const char versionString[] = "2.0.4";
// See unit test for example outputs
static BSONArray _versionArray(const char* version){