diff options
Diffstat (limited to 's')
47 files changed, 3475 insertions, 1784 deletions
diff --git a/s/balance.cpp b/s/balance.cpp index 8b01ea7..d4bba1e 100644 --- a/s/balance.cpp +++ b/s/balance.cpp @@ -34,10 +34,9 @@ namespace mongo { Balancer balancer; - Balancer::Balancer() : _balancedLastTime(0), _policy( new BalancerPolicy ) {} + Balancer::Balancer() : _balancedLastTime(0), _policy( new BalancerPolicy() ) {} Balancer::~Balancer() { - delete _policy; } int Balancer::_moveChunks( const vector<CandidateChunkPtr>* candidateChunks ) { @@ -74,7 +73,7 @@ namespace mongo { } // the move requires acquiring the collection metadata's lock, which can fail - log() << "balacer move failed: " << res << " from: " << chunkInfo.from << " to: " << chunkInfo.to + log() << "balancer move failed: " << res << " from: " << chunkInfo.from << " to: " << chunkInfo.to << " chunk: " << chunkToMove << endl; if ( res["chunkTooBig"].trueValue() ) { @@ -156,7 +155,7 @@ namespace mongo { cursor.reset(); if ( collections.empty() ) { - log(1) << "no collections to balance" << endl; + LOG(1) << "no collections to balance" << endl; return; } @@ -171,7 +170,7 @@ namespace mongo { vector<Shard> allShards; Shard::getAllShards( allShards ); if ( allShards.size() < 2) { - log(1) << "can't balance without more active shards" << endl; + LOG(1) << "can't balance without more active shards" << endl; return; } @@ -206,7 +205,7 @@ namespace mongo { cursor.reset(); if (shardToChunksMap.empty()) { - log(1) << "skipping empty collection (" << ns << ")"; + LOG(1) << "skipping empty collection (" << ns << ")"; continue; } @@ -245,9 +244,8 @@ namespace mongo { return true; } - catch ( std::exception& ) { - - log( LL_WARNING ) << "could not initialize balancer, please check that all shards and config servers are up" << endl; + catch ( std::exception& e ) { + warning() << "could not initialize balancer, please check that all shards and config servers are up: " << e.what() << endl; return false; } @@ -267,7 +265,7 @@ namespace mongo { break; } - // getConnectioString and the constructor of a DistributedLock do not throw, which is what we expect on while + // getConnectioString and dist lock constructor does not throw, which is what we expect on while // on the balancer thread ConnectionString config = configServer.getConnectionString(); DistributedLock balanceLock( config , "balancer" ); @@ -283,7 +281,7 @@ namespace mongo { // now make sure we should even be running if ( ! grid.shouldBalance() ) { - log(1) << "skipping balancing round because balancing is disabled" << endl; + LOG(1) << "skipping balancing round because balancing is disabled" << endl; conn.done(); sleepsecs( 30 ); @@ -295,41 +293,45 @@ namespace mongo { // use fresh shard state Shard::reloadShardInfo(); - dist_lock_try lk( &balanceLock , "doing balance round" ); - if ( ! lk.got() ) { - log(1) << "skipping balancing round because another balancer is active" << endl; - conn.done(); - - sleepsecs( 30 ); // no need to wake up soon - continue; - } - - log(1) << "*** start balancing round" << endl; - - vector<CandidateChunkPtr> candidateChunks; - _doBalanceRound( conn.conn() , &candidateChunks ); - if ( candidateChunks.size() == 0 ) { - log(1) << "no need to move any chunk" << endl; - } - else { - _balancedLastTime = _moveChunks( &candidateChunks ); + { + dist_lock_try lk( &balanceLock , "doing balance round" ); + if ( ! lk.got() ) { + LOG(1) << "skipping balancing round because another balancer is active" << endl; + conn.done(); + + sleepsecs( 30 ); // no need to wake up soon + continue; + } + + LOG(1) << "*** start balancing round" << endl; + + vector<CandidateChunkPtr> candidateChunks; + _doBalanceRound( conn.conn() , &candidateChunks ); + if ( candidateChunks.size() == 0 ) { + LOG(1) << "no need to move any chunk" << endl; + } + else { + _balancedLastTime = _moveChunks( &candidateChunks ); + } + + LOG(1) << "*** end of balancing round" << endl; } - - log(1) << "*** end of balancing round" << endl; + conn.done(); - + sleepsecs( _balancedLastTime ? 5 : 10 ); } catch ( std::exception& e ) { log() << "caught exception while doing balance: " << e.what() << endl; // Just to match the opening statement if in log level 1 - log(1) << "*** End of balancing round" << endl; + LOG(1) << "*** End of balancing round" << endl; sleepsecs( 30 ); // sleep a fair amount b/c of error continue; } } + } } // namespace mongo diff --git a/s/balance.h b/s/balance.h index 0ad2647..6875996 100644 --- a/s/balance.h +++ b/s/balance.h @@ -59,8 +59,8 @@ namespace mongo { int _balancedLastTime; // decide which chunks to move; owned here. - BalancerPolicy* _policy; - + scoped_ptr<BalancerPolicy> _policy; + /** * Checks that the balancer can connect to all servers it needs to do its job. * diff --git a/s/balancer_policy.cpp b/s/balancer_policy.cpp index 482fab0..f1b4bf1 100644 --- a/s/balancer_policy.cpp +++ b/s/balancer_policy.cpp @@ -53,6 +53,7 @@ namespace mongo { const bool draining = isDraining( shardLimits ); const bool opsQueued = hasOpsQueued( shardLimits ); + // Is this shard a better chunk receiver then the current one? // Shards that would be bad receiver candidates: // + maxed out shards @@ -64,6 +65,13 @@ namespace mongo { min = make_pair( shard , size ); } } + else if ( opsQueued ) { + LOG(1) << "won't send a chunk to: " << shard << " because it has ops queued" << endl; + } + else if ( maxedOut ) { + LOG(1) << "won't send a chunk to: " << shard << " because it is maxedOut" << endl; + } + // Check whether this shard is a better chunk donor then the current one. // Draining shards take a lower priority than overloaded shards. @@ -79,7 +87,7 @@ namespace mongo { // If there is no candidate chunk receiver -- they may have all been maxed out, // draining, ... -- there's not much that the policy can do. if ( min.second == numeric_limits<unsigned>::max() ) { - log() << "no availalable shards to take chunks" << endl; + log() << "no available shards to take chunks" << endl; return NULL; } @@ -88,13 +96,13 @@ namespace mongo { return NULL; } - log(1) << "collection : " << ns << endl; - log(1) << "donor : " << max.second << " chunks on " << max.first << endl; - log(1) << "receiver : " << min.second << " chunks on " << min.first << endl; + LOG(1) << "collection : " << ns << endl; + LOG(1) << "donor : " << max.second << " chunks on " << max.first << endl; + LOG(1) << "receiver : " << min.second << " chunks on " << min.first << endl; if ( ! drainingShards.empty() ) { string drainingStr; joinStringDelim( drainingShards, &drainingStr, ',' ); - log(1) << "draining : " << ! drainingShards.empty() << "(" << drainingShards.size() << ")" << endl; + LOG(1) << "draining : " << ! drainingShards.empty() << "(" << drainingShards.size() << ")" << endl; } // Solving imbalances takes a higher priority than draining shards. Many shards can @@ -126,7 +134,7 @@ namespace mongo { } BSONObj BalancerPolicy::pickChunk( const vector<BSONObj>& from, const vector<BSONObj>& to ) { - // It is possible for a donor ('from') shard to have less chunks than a recevier one ('to') + // It is possible for a donor ('from') shard to have less chunks than a receiver one ('to') // if the donor is in draining mode. if ( to.size() == 0 ) diff --git a/s/chunk.cpp b/s/chunk.cpp index 2d0ad5d..09dc994 100644 --- a/s/chunk.cpp +++ b/s/chunk.cpp @@ -19,8 +19,10 @@ #include "pch.h" #include "../client/connpool.h" +#include "../db/querypattern.h" #include "../db/queryutil.h" #include "../util/unittest.h" +#include "../util/timer.h" #include "chunk.h" #include "config.h" @@ -48,17 +50,34 @@ namespace mongo { int Chunk::MaxObjectPerChunk = 250000; - Chunk::Chunk( ChunkManager * manager ) : _manager(manager), _lastmod(0) { - _setDataWritten(); - } + Chunk::Chunk(const ChunkManager * manager, BSONObj from) + : _manager(manager), _lastmod(0), _dataWritten(mkDataWritten()) + { + string ns = from.getStringField( "ns" ); + _shard.reset( from.getStringField( "shard" ) ); + + _lastmod = from["lastmod"]; + assert( _lastmod > 0 ); - Chunk::Chunk(ChunkManager * info , const BSONObj& min, const BSONObj& max, const Shard& shard) - : _manager(info), _min(min), _max(max), _shard(shard), _lastmod(0) { - _setDataWritten(); + _min = from.getObjectField( "min" ).getOwned(); + _max = from.getObjectField( "max" ).getOwned(); + + uassert( 10170 , "Chunk needs a ns" , ! ns.empty() ); + uassert( 13327 , "Chunk ns must match server ns" , ns == _manager->getns() ); + + uassert( 10171 , "Chunk needs a server" , _shard.ok() ); + + uassert( 10172 , "Chunk needs a min" , ! _min.isEmpty() ); + uassert( 10173 , "Chunk needs a max" , ! _max.isEmpty() ); } - void Chunk::_setDataWritten() { - _dataWritten = rand() % ( MaxChunkSize / 5 ); + + Chunk::Chunk(const ChunkManager * info , const BSONObj& min, const BSONObj& max, const Shard& shard) + : _manager(info), _min(min), _max(max), _shard(shard), _lastmod(0), _dataWritten(mkDataWritten()) + {} + + long Chunk::mkDataWritten() { + return rand() % ( MaxChunkSize / 5 ); } string Chunk::getns() const { @@ -175,7 +194,7 @@ namespace mongo { conn.done(); } - bool Chunk::singleSplit( bool force , BSONObj& res , ChunkPtr* low, ChunkPtr* high) { + BSONObj Chunk::singleSplit( bool force , BSONObj& res ) const { vector<BSONObj> splitPoint; // if splitting is not obligatory we may return early if there are not enough data @@ -189,8 +208,8 @@ namespace mongo { // no split points means there isn't enough data to split on // 1 split point means we have between half the chunk size to full chunk size // so we shouldn't split - log(1) << "chunk not full enough to trigger auto-split" << endl; - return false; + LOG(1) << "chunk not full enough to trigger auto-split" << endl; + return BSONObj(); } splitPoint.push_back( candidates.front() ); @@ -228,24 +247,16 @@ namespace mongo { if ( splitPoint.empty() || _min == splitPoint.front() || _max == splitPoint.front() ) { log() << "want to split chunk, but can't find split point chunk " << toString() << " got: " << ( splitPoint.empty() ? "<empty>" : splitPoint.front().toString() ) << endl; - return false; - } - - if (!multiSplit( splitPoint , res , true )) - return false; - - if (low && high) { - low->reset( new Chunk(_manager, _min, splitPoint[0], _shard)); - high->reset(new Chunk(_manager, splitPoint[0], _max, _shard)); - } - else { - assert(!low && !high); // can't have one without the other + return BSONObj(); } - - return true; + + if (multiSplit( splitPoint , res )) + return splitPoint.front(); + else + return BSONObj(); } - bool Chunk::multiSplit( const vector<BSONObj>& m , BSONObj& res , bool resetIfSplit) { + bool Chunk::multiSplit( const vector<BSONObj>& m , BSONObj& res ) const { const size_t maxSplitPoints = 8192; uassert( 10165 , "can't split as shard doesn't have a manager" , _manager ); @@ -270,24 +281,22 @@ namespace mongo { warning() << "splitChunk failed - cmd: " << cmdObj << " result: " << res << endl; conn.done(); - // reloading won't stricly solve all problems, e.g. the collection's metdata lock can be taken - // but we issue here so that mongos may refresh wihtout needing to be written/read against - grid.getDBConfig(_manager->getns())->getChunkManager(_manager->getns(), true); + // reloading won't strictly solve all problems, e.g. the collection's metadata lock can be taken + // but we issue here so that mongos may refresh without needing to be written/read against + _manager->reload(); return false; } conn.done(); - - if ( resetIfSplit ) { - // force reload of chunks - grid.getDBConfig(_manager->getns())->getChunkManager(_manager->getns(), true); - } + + // force reload of config + _manager->reload(); return true; } - bool Chunk::moveAndCommit( const Shard& to , long long chunkSize /* bytes */, BSONObj& res ) { + bool Chunk::moveAndCommit( const Shard& to , long long chunkSize /* bytes */, BSONObj& res ) const { uassert( 10167 , "can't move shard to its current location!" , getShard() != to ); log() << "moving chunk ns: " << _manager->getns() << " moving ( " << toString() << ") " << _shard.toString() << " -> " << to.toString() << endl; @@ -311,15 +320,17 @@ namespace mongo { fromconn.done(); + log( worked ) << "moveChunk result: " << res << endl; + // if succeeded, needs to reload to pick up the new location // if failed, mongos may be stale // reload is excessive here as the failure could be simply because collection metadata is taken - grid.getDBConfig(_manager->getns())->getChunkManager(_manager->getns(), true); + _manager->reload(); return worked; } - bool Chunk::splitIfShould( long dataWritten ) { + bool Chunk::splitIfShould( long dataWritten ) const { LastError::Disabled d( lastError.get() ); try { @@ -332,28 +343,63 @@ namespace mongo { if ( _dataWritten < splitThreshold / 5 ) return false; - log(1) << "about to initiate autosplit: " << *this << " dataWritten: " << _dataWritten << " splitThreshold: " << splitThreshold << endl; + // this is a bit ugly + // we need it so that mongos blocks for the writes to actually be committed + // this does mean mongos has more back pressure than mongod alone + // since it nots 100% tcp queue bound + // this was implicit before since we did a splitVector on the same socket + ShardConnection::sync(); + + LOG(1) << "about to initiate autosplit: " << *this << " dataWritten: " << _dataWritten << " splitThreshold: " << splitThreshold << endl; _dataWritten = 0; // reset so we check often enough BSONObj res; - ChunkPtr low; - ChunkPtr high; - bool worked = singleSplit( false /* does not force a split if not enough data */ , res , &low, &high); - if ( !worked ) { + BSONObj splitPoint = singleSplit( false /* does not force a split if not enough data */ , res ); + if ( splitPoint.isEmpty() ) { // singleSplit would have issued a message if we got here _dataWritten = 0; // this means there wasn't enough data to split, so don't want to try again until considerable more data return false; } log() << "autosplitted " << _manager->getns() << " shard: " << toString() - << " on: " << low->getMax() << "(splitThreshold " << splitThreshold << ")" + << " on: " << splitPoint << "(splitThreshold " << splitThreshold << ")" #ifdef _DEBUG - << " size: " << getPhysicalSize() // slow - but can be usefule when debugging + << " size: " << getPhysicalSize() // slow - but can be useful when debugging #endif << endl; - low->moveIfShould( high ); + BSONElement shouldMigrate = res["shouldMigrate"]; // not in mongod < 1.9.1 but that is ok + if (!shouldMigrate.eoo() && grid.shouldBalance()){ + BSONObj range = shouldMigrate.embeddedObject(); + BSONObj min = range["min"].embeddedObject(); + BSONObj max = range["max"].embeddedObject(); + + Shard newLocation = Shard::pick( getShard() ); + if ( getShard() == newLocation ) { + // if this is the best shard, then we shouldn't do anything (Shard::pick already logged our shard). + LOG(1) << "recently split chunk: " << range << " already in the best shard: " << getShard() << endl; + return true; // we did split even if we didn't migrate + } + + ChunkManagerPtr cm = _manager->reload(false/*just reloaded in mulitsplit*/); + ChunkPtr toMove = cm->findChunk(min); + + if ( ! (toMove->getMin() == min && toMove->getMax() == max) ){ + LOG(1) << "recently split chunk: " << range << " modified before we could migrate " << toMove << endl; + return true; + } + + log() << "moving chunk (auto): " << toMove << " to: " << newLocation.toString() << endl; + + BSONObj res; + massert( 10412 , + str::stream() << "moveAndCommit failed: " << res , + toMove->moveAndCommit( newLocation , MaxChunkSize , res ) ); + + // update our config + _manager->reload(); + } return true; @@ -365,40 +411,6 @@ namespace mongo { } } - bool Chunk::moveIfShould( ChunkPtr newChunk ) { - ChunkPtr toMove; - - if ( newChunk->countObjects(2) <= 1 ) { - toMove = newChunk; - } - else if ( this->countObjects(2) <= 1 ) { - DEV assert( shared_from_this() ); - toMove = shared_from_this(); - } - else { - // moving middle shards is handled by balancer - return false; - } - - assert( toMove ); - - Shard newLocation = Shard::pick( getShard() ); - if ( getShard() == newLocation ) { - // if this is the best shard, then we shouldn't do anything (Shard::pick already logged our shard). - log(1) << "recently split chunk: " << toString() << "already in the best shard" << endl; - return 0; - } - - log() << "moving chunk (auto): " << toMove->toString() << " to: " << newLocation.toString() << " #objects: " << toMove->countObjects() << endl; - - BSONObj res; - massert( 10412 , - str::stream() << "moveAndCommit failed: " << res , - toMove->moveAndCommit( newLocation , MaxChunkSize , res ) ); - - return true; - } - long Chunk::getPhysicalSize() const { ScopedDbConnection conn( getShard().getConnString() ); @@ -416,24 +428,7 @@ namespace mongo { return (long)result["size"].number(); } - int Chunk::countObjects(int maxCount) const { - static const BSONObj fields = BSON("_id" << 1 ); - - ShardConnection conn( getShard() , _manager->getns() ); - - // not using regular count as this is more flexible and supports $min/$max - Query q = Query().minKey(_min).maxKey(_max); - int n; - { - auto_ptr<DBClientCursor> c = conn->query(_manager->getns(), q, maxCount, 0, &fields); - assert( c.get() ); - n = c->itcount(); - } - conn.done(); - return n; - } - - void Chunk::appendShortVersion( const char * name , BSONObjBuilder& b ) { + void Chunk::appendShortVersion( const char * name , BSONObjBuilder& b ) const { BSONObjBuilder bb( b.subobjStart( name ) ); bb.append( "min" , _min ); bb.append( "max" , _max ); @@ -481,33 +476,6 @@ namespace mongo { return buf.str(); } - void Chunk::unserialize(const BSONObj& from) { - string ns = from.getStringField( "ns" ); - _shard.reset( from.getStringField( "shard" ) ); - - _lastmod = from["lastmod"]; - assert( _lastmod > 0 ); - - BSONElement e = from["minDotted"]; - - if (e.eoo()) { - _min = from.getObjectField( "min" ).getOwned(); - _max = from.getObjectField( "max" ).getOwned(); - } - else { // TODO delete this case after giving people a chance to migrate - _min = e.embeddedObject().getOwned(); - _max = from.getObjectField( "maxDotted" ).getOwned(); - } - - uassert( 10170 , "Chunk needs a ns" , ! ns.empty() ); - uassert( 13327 , "Chunk ns must match server ns" , ns == _manager->getns() ); - - uassert( 10171 , "Chunk needs a server" , _shard.ok() ); - - uassert( 10172 , "Chunk needs a min" , ! _min.isEmpty() ); - uassert( 10173 , "Chunk needs a max" , ! _max.isEmpty() ); - } - string Chunk::toString() const { stringstream ss; ss << "ns:" << _manager->getns() << " at: " << _shard.toString() << " lastmod: " << _lastmod.toString() << " min: " << _min << " max: " << _max; @@ -523,57 +491,63 @@ namespace mongo { AtomicUInt ChunkManager::NextSequenceNumber = 1; ChunkManager::ChunkManager( string ns , ShardKeyPattern pattern , bool unique ) : - _ns( ns ) , _key( pattern ) , _unique( unique ) , _lock("rw:ChunkManager"), - _nsLock( ConnectionString( configServer.modelServer() , ConnectionString::SYNC ) , ns ) { - _reload_inlock(); // will set _sequenceNumber - } - - ChunkManager::~ChunkManager() { - _chunkMap.clear(); - _chunkRanges.clear(); - _shards.clear(); - } + _ns( ns ) , _key( pattern ) , _unique( unique ) , _chunkRanges(), _mutex("ChunkManager"), + _nsLock( ConnectionString( configServer.modelServer() , ConnectionString::SYNC ) , ns ), - void ChunkManager::_reload() { - rwlock lk( _lock , true ); - _reload_inlock(); - } + // The shard versioning mechanism hinges on keeping track of the number of times we reloaded ChunkManager's. + // Increasing this number here will prompt checkShardVersion() to refresh the connection-level versions to + // the most up to date value. + _sequenceNumber(++NextSequenceNumber) - void ChunkManager::_reload_inlock() { + { int tries = 3; while (tries--) { - _chunkMap.clear(); - _chunkRanges.clear(); - _shards.clear(); - _load(); - - if (_isValid()) { - _chunkRanges.reloadAll(_chunkMap); - - // The shard versioning mechanism hinges on keeping track of the number of times we reloaded ChunkManager's. - // Increasing this number here will prompt checkShardVersion() to refresh the connection-level versions to - // the most up to date value. - _sequenceNumber = ++NextSequenceNumber; + ChunkMap chunkMap; + set<Shard> shards; + ShardVersionMap shardVersions; + Timer t; + _load(chunkMap, shards, shardVersions); + { + int ms = t.millis(); + log() << "ChunkManager: time to load chunks for " << ns << ": " << ms << "ms" + << " sequenceNumber: " << _sequenceNumber + << " version: " << _version.toString() + << endl; + } + if (_isValid(chunkMap)) { + // These variables are const for thread-safety. Since the + // constructor can only be called from one thread, we don't have + // to worry about that here. + const_cast<ChunkMap&>(_chunkMap).swap(chunkMap); + const_cast<set<Shard>&>(_shards).swap(shards); + const_cast<ShardVersionMap&>(_shardVersions).swap(shardVersions); + const_cast<ChunkRangeManager&>(_chunkRanges).reloadAll(_chunkMap); return; } - + if (_chunkMap.size() < 10) { _printChunks(); } + + warning() << "ChunkManager loaded an invalid config, trying again" << endl; sleepmillis(10 * (3-tries)); } + // this will abort construction so we should never have a reference to an invalid config msgasserted(13282, "Couldn't load a valid config for " + _ns + " after 3 attempts. Please try again."); + } + ChunkManagerPtr ChunkManager::reload(bool force) const { + return grid.getDBConfig(getns())->getChunkManager(getns(), force); } - void ChunkManager::_load() { + void ChunkManager::_load(ChunkMap& chunkMap, set<Shard>& shards, ShardVersionMap& shardVersions) { ScopedDbConnection conn( configServer.modelServer() ); // TODO really need the sort? - auto_ptr<DBClientCursor> cursor = conn->query( Chunk::chunkMetadataNS, QUERY("ns" << _ns).sort("lastmod",1), 0, 0, 0, 0, + auto_ptr<DBClientCursor> cursor = conn->query( Chunk::chunkMetadataNS, QUERY("ns" << _ns).sort("lastmod",-1), 0, 0, 0, 0, (DEBUG_BUILD ? 2 : 1000000)); // batch size. Try to induce potential race conditions in debug builds assert( cursor.get() ); while ( cursor->more() ) { @@ -582,28 +556,36 @@ namespace mongo { continue; } - ChunkPtr c( new Chunk( this ) ); - c->unserialize( d ); + ChunkPtr c( new Chunk( this, d ) ); - _chunkMap[c->getMax()] = c; - _shards.insert(c->getShard()); + chunkMap[c->getMax()] = c; + shards.insert(c->getShard()); + + // set global max + if ( c->getLastmod() > _version ) + _version = c->getLastmod(); + + // set shard max + ShardChunkVersion& shardMax = shardVersions[c->getShard()]; + if ( c->getLastmod() > shardMax ) + shardMax = c->getLastmod(); } conn.done(); } - bool ChunkManager::_isValid() const { + bool ChunkManager::_isValid(const ChunkMap& chunkMap) { #define ENSURE(x) do { if(!(x)) { log() << "ChunkManager::_isValid failed: " #x << endl; return false; } } while(0) - if (_chunkMap.empty()) + if (chunkMap.empty()) return true; // Check endpoints - ENSURE(allOfType(MinKey, _chunkMap.begin()->second->getMin())); - ENSURE(allOfType(MaxKey, prior(_chunkMap.end())->second->getMax())); + ENSURE(allOfType(MinKey, chunkMap.begin()->second->getMin())); + ENSURE(allOfType(MaxKey, prior(chunkMap.end())->second->getMax())); // Make sure there are no gaps or overlaps - for (ChunkMap::const_iterator it=boost::next(_chunkMap.begin()), end=_chunkMap.end(); it != end; ++it) { + for (ChunkMap::const_iterator it=boost::next(chunkMap.begin()), end=chunkMap.end(); it != end; ++it) { ChunkMap::const_iterator last = prior(it); if (!(it->second->getMin() == last->second->getMax())) { @@ -625,14 +607,15 @@ namespace mongo { } } - bool ChunkManager::hasShardKey( const BSONObj& obj ) { + bool ChunkManager::hasShardKey( const BSONObj& obj ) const { return _key.hasShardKey( obj ); } - void ChunkManager::createFirstChunk( const Shard& shard ) { + void ChunkManager::createFirstChunk( const Shard& shard ) const { + // TODO distlock? assert( _chunkMap.size() == 0 ); - ChunkPtr c( new Chunk(this, _key.globalMin(), _key.globalMax(), shard ) ); + Chunk c (this, _key.globalMin(), _key.globalMax(), shard); // this is the first chunk; start the versioning from scratch ShardChunkVersion version; @@ -640,52 +623,42 @@ namespace mongo { // build update for the chunk collection BSONObjBuilder chunkBuilder; - c->serialize( chunkBuilder , version ); + c.serialize( chunkBuilder , version ); BSONObj chunkCmd = chunkBuilder.obj(); log() << "about to create first chunk for: " << _ns << endl; ScopedDbConnection conn( configServer.modelServer() ); BSONObj res; - conn->update( Chunk::chunkMetadataNS, QUERY( "_id" << c->genID() ), chunkCmd, true, false ); + conn->update( Chunk::chunkMetadataNS, QUERY( "_id" << c.genID() ), chunkCmd, true, false ); string errmsg = conn->getLastError(); if ( errmsg.size() ) { stringstream ss; ss << "saving first chunk failed. cmd: " << chunkCmd << " result: " << errmsg; log( LL_ERROR ) << ss.str() << endl; - msgasserted( 13592 , ss.str() ); // assert(13592) + msgasserted( 13592 , ss.str() ); } conn.done(); - // every instance of ChunkManager has a unique sequence number; callers of ChunkManager may - // inquiry about whether there were changes in chunk configuration (see re/load() calls) since - // the last access to ChunkManager by checking the sequence number - _sequenceNumber = ++NextSequenceNumber; - - _chunkMap[c->getMax()] = c; - _chunkRanges.reloadAll(_chunkMap); - _shards.insert(c->getShard()); - c->setLastmod(version); - // the ensure index will have the (desired) indirect effect of creating the collection on the // assigned shard, as it sets up the index over the sharding keys. - ensureIndex_inlock(); + ScopedDbConnection shardConn( c.getShard().getConnString() ); + shardConn->ensureIndex( getns() , getShardKey().key() , _unique , "" , false /* do not cache ensureIndex SERVER-1691 */ ); + shardConn.done(); - log() << "successfully created first chunk for " << c->toString() << endl; + log() << "successfully created first chunk for " << c.toString() << endl; } - ChunkPtr ChunkManager::findChunk( const BSONObj & obj) { + ChunkPtr ChunkManager::findChunk( const BSONObj & obj ) const { BSONObj key = _key.extractKey(obj); { - rwlock lk( _lock , false ); - BSONObj foo; ChunkPtr c; { - ChunkMap::iterator it = _chunkMap.upper_bound(key); + ChunkMap::const_iterator it = _chunkMap.upper_bound(key); if (it != _chunkMap.end()) { foo = it->first; c = it->second; @@ -693,25 +666,24 @@ namespace mongo { } if ( c ) { - if ( c->contains( obj ) ) + if ( c->contains( key ) ){ + dassert(c->contains(key)); // doesn't use fast-path in extractKey return c; + } PRINT(foo); PRINT(*c); PRINT(key); - grid.getDBConfig(getns())->getChunkManager(getns(), true); + reload(); massert(13141, "Chunk map pointed to incorrect chunk", false); } } - massert(8070, str::stream() << "couldn't find a chunk aftry retry which should be impossible extracted: " << key, false); - return ChunkPtr(); // unreachable + throw UserException( 8070 , str::stream() << "couldn't find a chunk which should be impossible: " << key ); } ChunkPtr ChunkManager::findChunkOnServer( const Shard& shard ) const { - rwlock lk( _lock , false ); - for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) { ChunkPtr c = i->second; if ( c->getShard() == shard ) @@ -721,14 +693,11 @@ namespace mongo { return ChunkPtr(); } - void ChunkManager::getShardsForQuery( set<Shard>& shards , const BSONObj& query ) { - rwlock lk( _lock , false ); - DEV PRINT(query); - + void ChunkManager::getShardsForQuery( set<Shard>& shards , const BSONObj& query ) const { //TODO look into FieldRangeSetOr - FieldRangeOrSet fros(_ns.c_str(), query, false); + OrRangeGenerator org(_ns.c_str(), query, false); - const string special = fros.getSpecial(); + const string special = org.getSpecial(); if (special == "2d") { BSONForEach(field, query) { if (getGtLtOp(field) == BSONObj::opNEAR) { @@ -743,25 +712,22 @@ namespace mongo { } do { - boost::scoped_ptr<FieldRangeSet> frs (fros.topFrs()); + boost::scoped_ptr<FieldRangeSetPair> frsp (org.topFrsp()); { // special case if most-significant field isn't in query - FieldRange range = frs->range(_key.key().firstElement().fieldName()); + FieldRange range = frsp->singleKeyRange(_key.key().firstElementFieldName()); if ( !range.nontrivial() ) { DEV PRINT(range.nontrivial()); - getAllShards_inlock(shards); + getAllShards(shards); return; } } - BoundList ranges = frs->indexBounds(_key.key(), 1); + BoundList ranges = frsp->singleKeyIndexBounds(_key.key(), 1); for (BoundList::const_iterator it=ranges.begin(), end=ranges.end(); it != end; ++it) { BSONObj minObj = it->first.replaceFieldNames(_key.key()); BSONObj maxObj = it->second.replaceFieldNames(_key.key()); - DEV PRINT(minObj); - DEV PRINT(maxObj); - ChunkRangeMap::const_iterator min, max; min = _chunkRanges.upper_bound(minObj); max = _chunkRanges.upper_bound(maxObj); @@ -781,14 +747,14 @@ namespace mongo { //return; } - if (fros.moreOrClauses()) - fros.popOrClause(); + if (org.moreOrClauses()) + org.popOrClauseSingleKey(); } - while (fros.moreOrClauses()); + while (org.moreOrClauses()); } - void ChunkManager::getShardsForRange(set<Shard>& shards, const BSONObj& min, const BSONObj& max) { + void ChunkManager::getShardsForRange(set<Shard>& shards, const BSONObj& min, const BSONObj& max) const { uassert(13405, "min must have shard key", hasShardKey(min)); uassert(13406, "max must have shard key", hasShardKey(max)); @@ -804,37 +770,30 @@ namespace mongo { } } - void ChunkManager::getAllShards( set<Shard>& all ) { - rwlock lk( _lock , false ); - getAllShards_inlock( all ); - } - - void ChunkManager::getAllShards_inlock( set<Shard>& all ){ + void ChunkManager::getAllShards( set<Shard>& all ) const { all.insert(_shards.begin(), _shards.end()); } - void ChunkManager::ensureIndex_inlock() { - //TODO in parallel? - for ( set<Shard>::const_iterator i=_shards.begin(); i!=_shards.end(); ++i ) { - ScopedDbConnection conn( i->getConnString() ); - conn->ensureIndex( getns() , getShardKey().key() , _unique , "" , false /* do not cache ensureIndex SERVER-1691 */ ); - conn.done(); - } - } - - void ChunkManager::drop( ChunkManagerPtr me ) { - rwlock lk( _lock , true ); + void ChunkManager::drop( ChunkManagerPtr me ) const { + scoped_lock lk( _mutex ); configServer.logChange( "dropCollection.start" , _ns , BSONObj() ); - dist_lock_try dlk( &_nsLock , "drop" ); + dist_lock_try dlk; + try{ + dlk = dist_lock_try( &_nsLock , "drop" ); + } + catch( LockException& e ){ + uassert( 14022, str::stream() << "Error locking distributed lock for chunk drop." << causedBy( e ), false); + } + uassert( 13331 , "collection's metadata is undergoing changes. Please try again." , dlk.got() ); uassert( 10174 , "config servers not all up" , configServer.allUp() ); set<Shard> seen; - log(1) << "ChunkManager::drop : " << _ns << endl; + LOG(1) << "ChunkManager::drop : " << _ns << endl; // lock all shards so no one can do a split/migrate for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) { @@ -842,12 +801,7 @@ namespace mongo { seen.insert( c->getShard() ); } - log(1) << "ChunkManager::drop : " << _ns << "\t all locked" << endl; - - // wipe my meta-data - _chunkMap.clear(); - _chunkRanges.clear(); - _shards.clear(); + LOG(1) << "ChunkManager::drop : " << _ns << "\t all locked" << endl; // delete data from mongod for ( set<Shard>::iterator i=seen.begin(); i!=seen.end(); i++ ) { @@ -856,82 +810,64 @@ namespace mongo { conn.done(); } - log(1) << "ChunkManager::drop : " << _ns << "\t removed shard data" << endl; + LOG(1) << "ChunkManager::drop : " << _ns << "\t removed shard data" << endl; // remove chunk data ScopedDbConnection conn( configServer.modelServer() ); conn->remove( Chunk::chunkMetadataNS , BSON( "ns" << _ns ) ); conn.done(); - log(1) << "ChunkManager::drop : " << _ns << "\t removed chunk data" << endl; + LOG(1) << "ChunkManager::drop : " << _ns << "\t removed chunk data" << endl; for ( set<Shard>::iterator i=seen.begin(); i!=seen.end(); i++ ) { ScopedDbConnection conn( *i ); BSONObj res; + + // this is horrible + // we need a special command for dropping on the d side + // this hack works for the moment + if ( ! setShardVersion( conn.conn() , _ns , 0 , true , res ) ) throw UserException( 8071 , str::stream() << "cleaning up after drop failed: " << res ); + conn->simpleCommand( "admin", 0, "unsetSharding" ); conn.done(); } - log(1) << "ChunkManager::drop : " << _ns << "\t DONE" << endl; + LOG(1) << "ChunkManager::drop : " << _ns << "\t DONE" << endl; configServer.logChange( "dropCollection" , _ns , BSONObj() ); } - bool ChunkManager::maybeChunkCollection() { - ensureIndex_inlock(); - + void ChunkManager::maybeChunkCollection() const { uassert( 13346 , "can't pre-split already splitted collection" , (_chunkMap.size() == 1) ); - + ChunkPtr soleChunk = _chunkMap.begin()->second; vector<BSONObj> splitPoints; soleChunk->pickSplitVector( splitPoints , Chunk::MaxChunkSize ); if ( splitPoints.empty() ) { - log(1) << "not enough data to warrant chunking " << getns() << endl; - return false; + LOG(1) << "not enough data to warrant chunking " << getns() << endl; + return; } - + BSONObj res; - bool worked = soleChunk->multiSplit( splitPoints , res , false ); + ChunkPtr p; + bool worked = soleChunk->multiSplit( splitPoints , res ); if (!worked) { log( LL_WARNING ) << "could not split '" << getns() << "': " << res << endl; - return false; + return; } - return true; } ShardChunkVersion ChunkManager::getVersion( const Shard& shard ) const { - rwlock lk( _lock , false ); - // TODO: cache or something? - - ShardChunkVersion max = 0; - - for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) { - ChunkPtr c = i->second; - DEV assert( c ); - if ( c->getShard() != shard ) - continue; - if ( c->getLastmod() > max ) - max = c->getLastmod(); - } - return max; + ShardVersionMap::const_iterator i = _shardVersions.find( shard ); + if ( i == _shardVersions.end() ) + return 0; + return i->second; } ShardChunkVersion ChunkManager::getVersion() const { - rwlock lk( _lock , false ); - - ShardChunkVersion max = 0; - - for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) { - ChunkPtr c = i->second; - if ( c->getLastmod() > max ) - max = c->getLastmod(); - } - - return max; + return _version; } string ChunkManager::toString() const { - rwlock lk( _lock , false ); - stringstream ss; ss << "ChunkManager: " << _ns << " key:" << _key.toString() << '\n'; for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) { @@ -993,69 +929,6 @@ namespace mongo { } } - void ChunkRangeManager::reloadRange(const ChunkMap& chunks, const BSONObj& min, const BSONObj& max) { - if (_ranges.empty()) { - reloadAll(chunks); - return; - } - - ChunkRangeMap::iterator low = _ranges.upper_bound(min); - ChunkRangeMap::iterator high = _ranges.lower_bound(max); - - assert(low != _ranges.end()); - assert(high != _ranges.end()); - assert(low->second); - assert(high->second); - - ChunkMap::const_iterator begin = chunks.upper_bound(low->second->getMin()); - ChunkMap::const_iterator end = chunks.lower_bound(high->second->getMax()); - - assert(begin != chunks.end()); - assert(end != chunks.end()); - - // C++ end iterators are one-past-last - ++high; - ++end; - - // update ranges - _ranges.erase(low, high); // invalidates low - _insertRange(begin, end); - - assert(!_ranges.empty()); - DEV assertValid(); - - // merge low-end if possible - low = _ranges.upper_bound(min); - assert(low != _ranges.end()); - if (low != _ranges.begin()) { - shared_ptr<ChunkRange> a = prior(low)->second; - shared_ptr<ChunkRange> b = low->second; - if (a->getShard() == b->getShard()) { - shared_ptr<ChunkRange> cr (new ChunkRange(*a, *b)); - _ranges.erase(prior(low)); - _ranges.erase(low); // invalidates low - _ranges[cr->getMax()] = cr; - } - } - - DEV assertValid(); - - // merge high-end if possible - high = _ranges.lower_bound(max); - if (high != prior(_ranges.end())) { - shared_ptr<ChunkRange> a = high->second; - shared_ptr<ChunkRange> b = boost::next(high)->second; - if (a->getShard() == b->getShard()) { - shared_ptr<ChunkRange> cr (new ChunkRange(*a, *b)); - _ranges.erase(boost::next(high)); - _ranges.erase(high); //invalidates high - _ranges[cr->getMax()] = cr; - } - } - - DEV assertValid(); - } - void ChunkRangeManager::reloadAll(const ChunkMap& chunks) { _ranges.clear(); _insertRange(chunks.begin(), chunks.end()); @@ -1095,13 +968,6 @@ namespace mongo { class ChunkObjUnitTest : public UnitTest { public: - void runShard() { - ChunkPtr c; - assert( ! c ); - c.reset( new Chunk( 0 ) ); - assert( c ); - } - void runShardChunkVersion() { vector<ShardChunkVersion> all; all.push_back( ShardChunkVersion(1,1) ); @@ -1118,9 +984,8 @@ namespace mongo { } void run() { - runShard(); runShardChunkVersion(); - log(1) << "shardObjTest passed" << endl; + LOG(1) << "shardObjTest passed" << endl; } } shardObjTest; @@ -1145,7 +1010,7 @@ namespace mongo { cmdBuilder.append( "shardHost" , s.getConnString() ); BSONObj cmd = cmdBuilder.obj(); - log(1) << " setShardVersion " << s.getName() << " " << conn.getServerAddress() << " " << ns << " " << cmd << " " << &conn << endl; + LOG(1) << " setShardVersion " << s.getName() << " " << conn.getServerAddress() << " " << ns << " " << cmd << " " << &conn << endl; return conn.runCommand( "admin" , cmd , result ); } @@ -37,13 +37,13 @@ namespace mongo { class ChunkRangeMangager; class ChunkObjUnitTest; - typedef shared_ptr<Chunk> ChunkPtr; + typedef shared_ptr<const Chunk> ChunkPtr; // key is max for each Chunk or ChunkRange typedef map<BSONObj,ChunkPtr,BSONObjCmp> ChunkMap; typedef map<BSONObj,shared_ptr<ChunkRange>,BSONObjCmp> ChunkRangeMap; - typedef shared_ptr<ChunkManager> ChunkManagerPtr; + typedef shared_ptr<const ChunkManager> ChunkManagerPtr; /** config.chunks @@ -52,17 +52,16 @@ namespace mongo { x is in a shard iff min <= x < max */ - class Chunk : boost::noncopyable, public boost::enable_shared_from_this<Chunk> { + class Chunk : boost::noncopyable { public: - Chunk( ChunkManager * info ); - Chunk( ChunkManager * info , const BSONObj& min, const BSONObj& max, const Shard& shard); + Chunk( const ChunkManager * info , BSONObj from); + Chunk( const ChunkManager * info , const BSONObj& min, const BSONObj& max, const Shard& shard); // // serialization support // void serialize(BSONObjBuilder& to, ShardChunkVersion myLastMod=0); - void unserialize(const BSONObj& from); // // chunk boundary support @@ -70,8 +69,6 @@ namespace mongo { const BSONObj& getMin() const { return _min; } const BSONObj& getMax() const { return _max; } - void setMin(const BSONObj& o) { _min = o; } - void setMax(const BSONObj& o) { _max = o; } // if min/max key is pos/neg infinity bool minIsInf() const; @@ -86,7 +83,7 @@ namespace mongo { // chunk version support // - void appendShortVersion( const char * name , BSONObjBuilder& b ); + void appendShortVersion( const char * name , BSONObjBuilder& b ) const; ShardChunkVersion getLastmod() const { return _lastmod; } void setLastmod( ShardChunkVersion v ) { _lastmod = v; } @@ -100,7 +97,7 @@ namespace mongo { * then we check the real size, and if its too big, we split * @return if something was split */ - bool splitIfShould( long dataWritten ); + bool splitIfShould( long dataWritten ) const; /** * Splits this chunk at a non-specificed split key to be chosen by the mongod holding this chunk. @@ -108,18 +105,18 @@ namespace mongo { * @param force if set to true, will split the chunk regardless if the split is really necessary size wise * if set to false, will only split if the chunk has reached the currently desired maximum size * @param res the object containing details about the split execution - * @return if found a key and split successfully + * @return splitPoint if found a key and split successfully, else empty BSONObj */ - bool singleSplit( bool force , BSONObj& res , ChunkPtr* low=NULL, ChunkPtr* high=NULL); + BSONObj singleSplit( bool force , BSONObj& res ) const; /** * Splits this chunk at the given key (or keys) * * @param splitPoints the vector of keys that should be used to divide this chunk * @param res the object containing details about the split execution - * @return if split was successful + * @return if the split was successful */ - bool multiSplit( const vector<BSONObj>& splitPoints , BSONObj& res , bool resetIfSplit ); + bool multiSplit( const vector<BSONObj>& splitPoints , BSONObj& res ) const; /** * Asks the mongod holding this chunk to find a key that approximately divides this chunk in two @@ -141,13 +138,6 @@ namespace mongo { // /** - * moves either this shard or newShard if it makes sense too - * - * @return whether or not a shard was moved - */ - bool moveIfShould( ChunkPtr newShard = ChunkPtr() ); - - /** * Issues a migrate request for this chunk * * @param to shard to move this chunk to @@ -155,7 +145,7 @@ namespace mongo { * @param res the object containing details about the migrate execution * @return true if move was successful */ - bool moveAndCommit( const Shard& to , long long chunkSize , BSONObj& res ); + bool moveAndCommit( const Shard& to , long long chunkSize , BSONObj& res ) const; /** * @return size of shard in bytes @@ -164,11 +154,6 @@ namespace mongo { long getPhysicalSize() const; // - // chunk size support - - int countObjects(int maxcount=0) const; - - // // public constants // @@ -192,9 +177,10 @@ namespace mongo { private: + // main shard info - ChunkManager * _manager; + const ChunkManager * _manager; BSONObj _min; BSONObj _max; @@ -203,7 +189,7 @@ namespace mongo { // transient stuff - long _dataWritten; + mutable long _dataWritten; // methods, etc.. @@ -215,7 +201,7 @@ namespace mongo { BSONObj _getExtremeKey( int sort ) const; /** initializes _dataWritten with a random value so that a mongos restart wouldn't cause delay in splitting */ - void _setDataWritten(); + static long mkDataWritten(); ShardKeyPattern skey() const; }; @@ -275,7 +261,6 @@ namespace mongo { void clear() { _ranges.clear(); } void reloadAll(const ChunkMap& chunks); - void reloadRange(const ChunkMap& chunks, const BSONObj& min, const BSONObj& max); // Slow operation -- wrap with DEV void assertValid() const; @@ -298,27 +283,27 @@ namespace mongo { */ class ChunkManager { public: + typedef map<Shard,ShardChunkVersion> ShardVersionMap; ChunkManager( string ns , ShardKeyPattern pattern , bool unique ); - virtual ~ChunkManager(); string getns() const { return _ns; } - int numChunks() const { rwlock lk( _lock , false ); return _chunkMap.size(); } - bool hasShardKey( const BSONObj& obj ); + int numChunks() const { return _chunkMap.size(); } + bool hasShardKey( const BSONObj& obj ) const; - void createFirstChunk( const Shard& shard ); - ChunkPtr findChunk( const BSONObj& obj ); + void createFirstChunk( const Shard& shard ) const; // only call from DBConfig::shardCollection + ChunkPtr findChunk( const BSONObj& obj ) const; ChunkPtr findChunkOnServer( const Shard& shard ) const; const ShardKeyPattern& getShardKey() const { return _key; } bool isUnique() const { return _unique; } - bool maybeChunkCollection(); + void maybeChunkCollection() const; - void getShardsForQuery( set<Shard>& shards , const BSONObj& query ); - void getAllShards( set<Shard>& all ); - void getShardsForRange(set<Shard>& shards, const BSONObj& min, const BSONObj& max); // [min, max) + void getShardsForQuery( set<Shard>& shards , const BSONObj& query ) const; + void getAllShards( set<Shard>& all ) const; + void getShardsForRange(set<Shard>& shards, const BSONObj& min, const BSONObj& max) const; // [min, max) string toString() const; @@ -330,7 +315,7 @@ namespace mongo { */ unsigned long long getSequenceNumber() const { return _sequenceNumber; } - void getInfo( BSONObjBuilder& b ) { + void getInfo( BSONObjBuilder& b ) const { b.append( "key" , _key.key() ); b.appendBool( "unique" , _unique ); } @@ -338,39 +323,41 @@ namespace mongo { /** * @param me - so i don't get deleted before i'm done */ - void drop( ChunkManagerPtr me ); + void drop( ChunkManagerPtr me ) const; void _printChunks() const; int getCurrentDesiredChunkSize() const; private: - void _reload(); - void _reload_inlock(); - void _load(); + ChunkManagerPtr reload(bool force=true) const; // doesn't modify self! + + // helpers for constructor + void _load(ChunkMap& chunks, set<Shard>& shards, ShardVersionMap& shardVersions); + static bool _isValid(const ChunkMap& chunks); - void ensureIndex_inlock(); - void getAllShards_inlock( set<Shard>& all ); + // All members should be const for thread-safety + const string _ns; + const ShardKeyPattern _key; + const bool _unique; - string _ns; - ShardKeyPattern _key; - bool _unique; + const ChunkMap _chunkMap; + const ChunkRangeManager _chunkRanges; - ChunkMap _chunkMap; - ChunkRangeManager _chunkRanges; + const set<Shard> _shards; - set<Shard> _shards; + const ShardVersionMap _shardVersions; // max version per shard - unsigned long long _sequenceNumber; + ShardChunkVersion _version; // max version of any chunk - mutable RWLock _lock; - DistributedLock _nsLock; + mutable mutex _mutex; // only used with _nsLock + mutable DistributedLock _nsLock; + + const unsigned long long _sequenceNumber; friend class Chunk; friend class ChunkRangeManager; // only needed for CRM::assertValid() static AtomicUInt NextSequenceNumber; - - bool _isValid() const; }; // like BSONObjCmp. for use as an STL comparison functor diff --git a/s/client.cpp b/s/client.cpp index c053289..0da05b6 100644 --- a/s/client.cpp +++ b/s/client.cpp @@ -55,7 +55,7 @@ namespace mongo { if ( p ) { HostAndPort r = p->remote(); - if ( _remote.port() == -1 ) + if ( ! _remote.hasPort() ) _remote = r; else if ( _remote != r ) { stringstream ss; @@ -96,7 +96,7 @@ namespace mongo { BSONElement cid = gle["connectionId"]; if ( cid.eoo() ) { - error() << "getLastError writeback can't work because of version mis-match" << endl; + error() << "getLastError writeback can't work because of version mismatch" << endl; return; } @@ -114,7 +114,7 @@ namespace mongo { return res; if ( fromWriteBackListener ) { - LOG(1) << "not doing recusrive writeback" << endl; + LOG(1) << "not doing recursive writeback" << endl; return res; } @@ -150,7 +150,7 @@ namespace mongo { } catch( std::exception &e ){ - warning() << "Could not get last error." << e.what() << endl; + warning() << "could not get last error." << causedBy( e ) << endl; // Catch everything that happens here, since we need to ensure we return our connection when we're // finished. @@ -223,7 +223,7 @@ namespace mongo { // Safe to return here, since we haven't started any extra processing yet, just collecting // responses. - warning() << "Could not get last error." << e.what() << endl; + warning() << "could not get last error." << causedBy( e ) << endl; conn.done(); return false; @@ -18,6 +18,7 @@ #include "../pch.h" #include "writeback_listener.h" +#include "../db/security.h" namespace mongo { @@ -82,9 +83,10 @@ namespace mongo { void noAutoSplit() { _autoSplitOk = false; } static ClientInfo * get(); - + AuthenticationInfo* getAuthenticationInfo() const { return (AuthenticationInfo*)&_ai; } + bool isAdmin() { return _ai.isAuthorized( "admin" ); } private: - + AuthenticationInfo _ai; struct WBInfo { WBInfo( const WriteBackListener::ConnectionIdent& c , OID o ) : ident( c ) , id( o ) {} WriteBackListener::ConnectionIdent ident; diff --git a/s/commands_admin.cpp b/s/commands_admin.cpp index 7677265..4568c4d 100644 --- a/s/commands_admin.cpp +++ b/s/commands_admin.cpp @@ -26,7 +26,8 @@ */ #include "pch.h" -#include "../util/message.h" +#include "../util/net/message.h" +#include "../util/net/listen.h" #include "../util/processinfo.h" #include "../util/stringutils.h" #include "../util/version.h" @@ -44,6 +45,7 @@ #include "stats.h" #include "writeback_listener.h" #include "client.h" +#include "../util/ramlog.h" namespace mongo { @@ -62,6 +64,15 @@ namespace mongo { // all grid commands are designed not to lock virtual LockType locktype() const { return NONE; } + + bool okForConfigChanges( string& errmsg ) { + string e; + if ( ! configServer.allUp(e) ) { + errmsg = str::stream() << "not all config servers are up: " << e; + return false; + } + return true; + } }; // --------------- misc commands ---------------------- @@ -72,7 +83,7 @@ namespace mongo { virtual void help( stringstream& help ) const { help << " shows status/reachability of servers in the cluster"; } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { result.append("configserver", configServer.getPrimary().getConnString() ); result.append("isdbgrid", 1); return true; @@ -85,7 +96,7 @@ namespace mongo { virtual void help( stringstream& help ) const { help << "flush all router config"; } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { grid.flushConfig(); result.appendBool( "flushed" , true ); return true; @@ -102,7 +113,7 @@ namespace mongo { virtual bool slaveOk() const { return true; } virtual LockType locktype() const { return NONE; } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { result.append( "host" , prettyHostName() ); result.append("version", versionString); result.append("process","mongos"); @@ -167,6 +178,20 @@ namespace mongo { bb.done(); } + { + RamLog* rl = RamLog::get( "warnings" ); + verify(15879, rl); + + if (rl->lastWrite() >= time(0)-(10*60)){ // only show warnings from last 10 minutes + vector<const char*> lines; + rl->get( lines ); + + BSONArrayBuilder arr( result.subarrayStart( "warnings" ) ); + for ( unsigned i=std::max(0,(int)lines.size()-10); i<lines.size(); i++ ) + arr.append( lines[i] ); + arr.done(); + } + } return 1; } @@ -177,7 +202,7 @@ namespace mongo { class FsyncCommand : public GridAdminCmd { public: FsyncCommand() : GridAdminCmd( "fsync" ) {} - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { if ( cmdObj["lock"].trueValue() ) { errmsg = "can't do lock through mongos"; return false; @@ -217,9 +242,8 @@ namespace mongo { MoveDatabasePrimaryCommand() : GridAdminCmd("movePrimary") { } virtual void help( stringstream& help ) const { help << " example: { moveprimary : 'foo' , to : 'localhost:9999' }"; - // TODO: locking? } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string dbname = cmdObj.firstElement().valuestrsafe(); if ( dbname.size() == 0 ) { @@ -246,7 +270,7 @@ namespace mongo { Shard s = Shard::make( to ); if ( config->getPrimary() == s.getConnString() ) { - errmsg = "thats already the primary"; + errmsg = "it is already the primary"; return false; } @@ -255,10 +279,27 @@ namespace mongo { return false; } - log() << "movePrimary: moving " << dbname << " primary from: " << config->getPrimary().toString() + log() << "Moving " << dbname << " primary from: " << config->getPrimary().toString() << " to: " << s.toString() << endl; - // TODO LOCKING: this is not safe with multiple mongos + // Locking enabled now... + DistributedLock lockSetup( configServer.getConnectionString(), dbname + "-movePrimary" ); + dist_lock_try dlk; + + // Distributed locking added. + try{ + dlk = dist_lock_try( &lockSetup , string("Moving primary shard of ") + dbname ); + } + catch( LockException& e ){ + errmsg = str::stream() << "error locking distributed lock to move primary shard of " << dbname << causedBy( e ); + warning() << errmsg << endl; + return false; + } + + if ( ! dlk.got() ) { + errmsg = (string)"metadata lock is already taken for moving " + dbname; + return false; + } ScopedDbConnection toconn( s.getConnString() ); @@ -297,19 +338,31 @@ namespace mongo { << "Enable sharding for a db. (Use 'shardcollection' command afterwards.)\n" << " { enablesharding : \"<dbname>\" }\n"; } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string dbname = cmdObj.firstElement().valuestrsafe(); if ( dbname.size() == 0 ) { errmsg = "no db"; return false; } + + if ( dbname == "admin" ) { + errmsg = "can't shard the admin db"; + return false; + } + if ( dbname == "local" ) { + errmsg = "can't shard the local db"; + return false; + } DBConfigPtr config = grid.getDBConfig( dbname ); if ( config->isShardingEnabled() ) { errmsg = "already enabled"; return false; } - + + if ( ! okForConfigChanges( errmsg ) ) + return false; + log() << "enabling sharding on: " << dbname << endl; config->enableSharding(); @@ -330,7 +383,7 @@ namespace mongo { << " { enablesharding : \"<dbname>\" }\n"; } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string ns = cmdObj.firstElement().valuestrsafe(); if ( ns.size() == 0 ) { errmsg = "no ns"; @@ -366,9 +419,12 @@ namespace mongo { return false; } + if ( ! okForConfigChanges( errmsg ) ) + return false; + // Sharding interacts with indexing in at least two ways: // - // 1. A unique index must have the sharding key as its prefix. Otherwise maintainig uniqueness would + // 1. A unique index must have the sharding key as its prefix. Otherwise maintaining uniqueness would // require coordinated access to all shards. Trying to shard a collection with such an index is not // allowed. // @@ -380,25 +436,38 @@ namespace mongo { // // We enforce both these conditions in what comes next. + bool careAboutUnique = cmdObj["unique"].trueValue(); + { ShardKeyPattern proposedKey( key ); bool hasShardIndex = false; + bool hasUniqueShardIndex = false; ScopedDbConnection conn( config->getPrimary() ); BSONObjBuilder b; b.append( "ns" , ns ); + BSONArrayBuilder allIndexes; + auto_ptr<DBClientCursor> cursor = conn->query( config->getName() + ".system.indexes" , b.obj() ); while ( cursor->more() ) { BSONObj idx = cursor->next(); + allIndexes.append( idx ); + + bool idIndex = ! idx["name"].eoo() && idx["name"].String() == "_id_"; + bool uniqueIndex = ( ! idx["unique"].eoo() && idx["unique"].trueValue() ) || + idIndex; + // Is index key over the sharding key? Remember that. if ( key.woCompare( idx["key"].embeddedObjectUserCheck() ) == 0 ) { hasShardIndex = true; + hasUniqueShardIndex = uniqueIndex; + continue; } // Not a unique index? Move on. - if ( idx["unique"].eoo() || ! idx["unique"].trueValue() ) + if ( ! uniqueIndex || idIndex ) continue; // Shard key is prefix of unique index? Move on. @@ -409,6 +478,12 @@ namespace mongo { conn.done(); return false; } + + if( careAboutUnique && hasShardIndex && ! hasUniqueShardIndex ){ + errmsg = (string)"can't shard collection " + ns + ", index not unique"; + conn.done(); + return false; + } BSONObj res = conn->findOne( config->getName() + ".system.namespaces" , BSON( "name" << ns ) ); if ( res["options"].type() == Object && res["options"].embeddedObject()["capped"].trueValue() ) { @@ -432,6 +507,8 @@ namespace mongo { if ( ! hasShardIndex && ( conn->count( ns ) != 0 ) ) { errmsg = "please create an index over the sharding key before sharding."; + result.append( "proposedKey" , key ); + result.appendArray( "curIndexes" , allIndexes.done() ); conn.done(); return false; } @@ -441,7 +518,7 @@ namespace mongo { tlog() << "CMD: shardcollection: " << cmdObj << endl; - config->shardCollection( ns , key , cmdObj["unique"].trueValue() ); + config->shardCollection( ns , key , careAboutUnique ); result << "collectionsharded" << ns; return true; @@ -455,10 +532,10 @@ namespace mongo { help << " example: { getShardVersion : 'alleyinsider.foo' } "; } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string ns = cmdObj.firstElement().valuestrsafe(); if ( ns.size() == 0 ) { - errmsg = "need to speciy fully namespace"; + errmsg = "need to specify fully namespace"; return false; } @@ -468,7 +545,7 @@ namespace mongo { return false; } - ChunkManagerPtr cm = config->getChunkManager( ns ); + ChunkManagerPtr cm = config->getChunkManagerIfExists( ns ); if ( ! cm ) { errmsg = "no chunk manager?"; return false; @@ -489,11 +566,15 @@ namespace mongo { << " { split : 'alleyinsider.blog.posts' , find : { ts : 1 } }\n" << " example: - split the shard that contains the key with this as the middle \n" << " { split : 'alleyinsider.blog.posts' , middle : { ts : 1 } }\n" - << " NOTE: this does not move move the chunks, it merely creates a logical seperation \n" + << " NOTE: this does not move move the chunks, it merely creates a logical separation \n" ; } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { + + if ( ! okForConfigChanges( errmsg ) ) + return false; + ShardConnection::sync(); string ns = cmdObj.firstElement().valuestrsafe(); @@ -504,8 +585,11 @@ namespace mongo { DBConfigPtr config = grid.getDBConfig( ns ); if ( ! config->isSharded( ns ) ) { - errmsg = "ns not sharded. have to shard before can split"; - return false; + config->reload(); + if ( ! config->isSharded( ns ) ) { + errmsg = "ns not sharded. have to shard before can split"; + return false; + } } BSONObj find = cmdObj.getObjectField( "find" ); @@ -528,8 +612,8 @@ namespace mongo { BSONObj res; bool worked; if ( middle.isEmpty() ) { - worked = chunk->singleSplit( true /* force a split even if not enough data */ , res ); - + BSONObj ret = chunk->singleSplit( true /* force a split even if not enough data */ , res ); + worked = !ret.isEmpty(); } else { // sanity check if the key provided is a valid split point @@ -538,9 +622,14 @@ namespace mongo { return false; } + if (!fieldsMatch(middle, info->getShardKey().key())){ + errmsg = "middle has different fields (or different order) than shard key"; + return false; + } + vector<BSONObj> splitPoints; splitPoints.push_back( middle ); - worked = chunk->multiSplit( splitPoints , res , true ); + worked = chunk->multiSplit( splitPoints , res ); } if ( !worked ) { @@ -559,7 +648,11 @@ namespace mongo { virtual void help( stringstream& help ) const { help << "{ movechunk : 'test.foo' , find : { num : 1 } , to : 'localhost:30001' }"; } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { + + if ( ! okForConfigChanges( errmsg ) ) + return false; + ShardConnection::sync(); Timer t; @@ -571,8 +664,11 @@ namespace mongo { DBConfigPtr config = grid.getDBConfig( ns ); if ( ! config->isSharded( ns ) ) { - errmsg = "ns not sharded. have to shard before can move a chunk"; - return false; + config->reload(); + if ( ! config->isSharded( ns ) ) { + errmsg = "ns not sharded. have to shard before we can move a chunk"; + return false; + } } BSONObj find = cmdObj.getObjectField( "find" ); @@ -613,7 +709,7 @@ namespace mongo { return false; } - // pre-emptively reload the config to get new version info + // preemptively reload the config to get new version info config->getChunkManager( ns , true ); result.append( "millis" , t.millis() ); @@ -629,7 +725,7 @@ namespace mongo { virtual void help( stringstream& help ) const { help << "list all shards of the system"; } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { ScopedDbConnection conn( configServer.getPrimary() ); vector<BSONObj> all; @@ -653,7 +749,7 @@ namespace mongo { virtual void help( stringstream& help ) const { help << "add a new shard to the system"; } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { errmsg.clear(); // get replica set component hosts @@ -663,12 +759,15 @@ namespace mongo { return false; } - // using localhost in server names implies every other process must use locahost addresses too + // using localhost in server names implies every other process must use localhost addresses too vector<HostAndPort> serverAddrs = servers.getServers(); for ( size_t i = 0 ; i < serverAddrs.size() ; i++ ) { if ( serverAddrs[i].isLocalHost() != grid.allowLocalHost() ) { - errmsg = "can't use localhost as a shard since all shards need to communicate. " - "either use all shards and configdbs in localhost or all in actual IPs " ; + errmsg = str::stream() << + "can't use localhost as a shard since all shards need to communicate. " << + "either use all shards and configdbs in localhost or all in actual IPs " << + " host: " << serverAddrs[i].toString() << " isLocalHost:" << serverAddrs[i].isLocalHost(); + log() << "addshard request " << cmdObj << " failed: attempt to mix localhosts and IPs" << endl; return false; } @@ -711,7 +810,7 @@ namespace mongo { virtual void help( stringstream& help ) const { help << "remove a shard to the system."; } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string target = cmdObj.firstElement().valuestrsafe(); Shard s = Shard::make( target ); if ( ! grid.knowAboutShard( s.getConnString() ) ) { @@ -794,11 +893,12 @@ namespace mongo { class IsDbGridCmd : public Command { public: virtual LockType locktype() const { return NONE; } + virtual bool requiresAuth() { return false; } virtual bool slaveOk() const { return true; } IsDbGridCmd() : Command("isdbgrid") { } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { result.append("isdbgrid", 1); result.append("hostname", getHostNameCached()); return true; @@ -816,7 +916,7 @@ namespace mongo { help << "test if this is master half of a replica pair"; } CmdIsMaster() : Command("isMaster" , false , "ismaster") { } - virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { result.appendBool("ismaster", true ); result.append("msg", "isdbgrid"); result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize); @@ -840,7 +940,7 @@ namespace mongo { virtual void help( stringstream &help ) const { help << "{whatsmyuri:1}"; } - virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { result << "you" << ClientInfo::get()->getRemote(); return true; } @@ -858,7 +958,7 @@ namespace mongo { help << "get previous error (since last reseterror command)"; } CmdShardingGetPrevError() : Command( "getPrevError" , false , "getpreverror") { } - virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { errmsg += "getpreverror not supported for sharded environments"; return false; } @@ -876,7 +976,7 @@ namespace mongo { } CmdShardingGetLastError() : Command("getLastError" , false , "getlasterror") { } - virtual bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + virtual bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { LastError *le = lastError.disableForCommand(); { assert( le ); @@ -903,7 +1003,7 @@ namespace mongo { return true; } - bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) { + bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) { LastError *le = lastError.get(); if ( le ) le->reset(); @@ -934,7 +1034,7 @@ namespace mongo { virtual LockType locktype() const { return NONE; } virtual void help( stringstream& help ) const { help << "list databases on cluster"; } - bool run(const string& , BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) { + bool run(const string& , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) { vector<Shard> shards; Shard::getAllShards( shards ); @@ -976,7 +1076,7 @@ namespace mongo { if ( name == "local" ) { // we don't return local - // since all shards have their own independant local + // since all shards have their own independent local continue; } @@ -1031,7 +1131,7 @@ namespace mongo { virtual LockType locktype() const { return NONE; } virtual void help( stringstream& help ) const { help << "Not supported sharded"; } - bool run(const string& , BSONObj& jsobj, string& errmsg, BSONObjBuilder& /*result*/, bool /*fromRepl*/) { + bool run(const string& , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& /*result*/, bool /*fromRepl*/) { errmsg = "closeAllDatabases isn't supported through mongos"; return false; } @@ -1047,13 +1147,25 @@ namespace mongo { virtual LockType locktype() const { return NONE; } virtual void help( stringstream& help ) const { help << "Not supported through mongos"; } - bool run(const string& , BSONObj& jsobj, string& errmsg, BSONObjBuilder& /*result*/, bool /*fromRepl*/) { + bool run(const string& , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) { if ( jsobj["forShell"].trueValue() ) lastError.disableForCommand(); errmsg = "replSetGetStatus is not supported through mongos"; + result.append("info", "mongos"); // see sayReplSetMemberState return false; } } cmdReplSetGetStatus; + CmdShutdown cmdShutdown; + + void CmdShutdown::help( stringstream& help ) const { + help << "shutdown the database. must be ran against admin db and " + << "either (1) ran from localhost or (2) authenticated."; + } + + bool CmdShutdown::run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { + return shutdownHelper(); + } + } // namespace mongo diff --git a/s/commands_public.cpp b/s/commands_public.cpp index f29205b..ef7110c 100644 --- a/s/commands_public.cpp +++ b/s/commands_public.cpp @@ -18,20 +18,28 @@ */ #include "pch.h" -#include "../util/message.h" +#include "../util/net/message.h" #include "../db/dbmessage.h" #include "../client/connpool.h" #include "../client/parallel.h" #include "../db/commands.h" -#include "../db/query.h" +#include "../db/queryutil.h" +#include "../scripting/engine.h" #include "config.h" #include "chunk.h" #include "strategy.h" #include "grid.h" +#include "mr_shard.h" +#include "client.h" namespace mongo { + bool setParmsMongodSpecific(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) + { + return true; + } + namespace dbgrid_pub_cmds { class PublicGridCommand : public Command { @@ -45,22 +53,38 @@ namespace mongo { return false; } + // Override if passthrough should also send query options + // Safer as off by default, can slowly enable as we add more tests + virtual bool passOptions() const { return false; } + // all grid commands are designed not to lock virtual LockType locktype() const { return NONE; } protected: + bool passthrough( DBConfigPtr conf, const BSONObj& cmdObj , BSONObjBuilder& result ) { - return _passthrough(conf->getName(), conf, cmdObj, result); + return _passthrough(conf->getName(), conf, cmdObj, 0, result); } bool adminPassthrough( DBConfigPtr conf, const BSONObj& cmdObj , BSONObjBuilder& result ) { - return _passthrough("admin", conf, cmdObj, result); + return _passthrough("admin", conf, cmdObj, 0, result); + } + + bool passthrough( DBConfigPtr conf, const BSONObj& cmdObj , int options, BSONObjBuilder& result ) { + return _passthrough(conf->getName(), conf, cmdObj, options, result); + } + bool adminPassthrough( DBConfigPtr conf, const BSONObj& cmdObj , int options, BSONObjBuilder& result ) { + return _passthrough("admin", conf, cmdObj, options, result); } private: - bool _passthrough(const string& db, DBConfigPtr conf, const BSONObj& cmdObj , BSONObjBuilder& result ) { + bool _passthrough(const string& db, DBConfigPtr conf, const BSONObj& cmdObj , int options , BSONObjBuilder& result ) { ShardConnection conn( conf->getPrimary() , "" ); BSONObj res; - bool ok = conn->runCommand( db , cmdObj , res ); + bool ok = conn->runCommand( db , cmdObj , res , passOptions() ? options : 0 ); + if ( ! ok && res["code"].numberInt() == StaleConfigInContextCode ) { + conn.done(); + throw StaleConfigException("foo","command failed because of stale config"); + } result.appendElements( res ); conn.done(); return ok; @@ -87,13 +111,14 @@ namespace mongo { virtual void aggregateResults(const vector<BSONObj>& results, BSONObjBuilder& output) {} // don't override - virtual bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& output, bool) { + virtual bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& output, bool) { + LOG(1) << "RunOnAllShardsCommand db: " << dbName << " cmd:" << cmdObj << endl; set<Shard> shards; getShards(dbName, cmdObj, shards); list< shared_ptr<Future::CommandResult> > futures; for ( set<Shard>::const_iterator i=shards.begin(), end=shards.end() ; i != end ; i++ ) { - futures.push_back( Future::spawnCommand( i->getConnString() , dbName , cmdObj ) ); + futures.push_back( Future::spawnCommand( i->getConnString() , dbName , cmdObj, 0 ) ); } vector<BSONObj> results; @@ -147,13 +172,13 @@ namespace mongo { virtual string getFullNS( const string& dbName , const BSONObj& cmdObj ) = 0; - virtual bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + virtual bool run(const string& dbName , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) { string fullns = getFullNS( dbName , cmdObj ); DBConfigPtr conf = grid.getDBConfig( dbName , false ); if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) { - return passthrough( conf , cmdObj , result ); + return passthrough( conf , cmdObj , options, result ); } errmsg = "can't do command: " + name + " on sharded collection"; return false; @@ -172,9 +197,41 @@ namespace mongo { ReIndexCmd() : AllShardsCollectionCommand("reIndex") {} } reIndexCmd; + class ProfileCmd : public PublicGridCommand { + public: + ProfileCmd() : PublicGridCommand("profile") {} + virtual bool run(const string& dbName , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) { + errmsg = "profile currently not supported via mongos"; + return false; + } + } profileCmd; + + class ValidateCmd : public AllShardsCollectionCommand { public: ValidateCmd() : AllShardsCollectionCommand("validate") {} + virtual void aggregateResults(const vector<BSONObj>& results, BSONObjBuilder& output) { + for (vector<BSONObj>::const_iterator it(results.begin()), end(results.end()); it!=end; it++){ + const BSONObj& result = *it; + const BSONElement valid = result["valid"]; + if (!valid.eoo()){ + if (!valid.trueValue()) { + output.appendBool("valid", false); + return; + } + } + else { + // Support pre-1.9.0 output with everything in a big string + const char* s = result["result"].valuestrsafe(); + if (strstr(s, "exception") || strstr(s, "corrupt")){ + output.appendBool("valid", false); + return; + } + } + } + + output.appendBool("valid", true); + } } validateCmd; class RepairDatabaseCmd : public RunOnAllShardsCommand { @@ -221,7 +278,7 @@ namespace mongo { class DropCmd : public PublicGridCommand { public: DropCmd() : PublicGridCommand( "drop" ) {} - bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string collection = cmdObj.firstElement().valuestrsafe(); string fullns = dbName + "." + collection; @@ -246,7 +303,7 @@ namespace mongo { class DropDBCmd : public PublicGridCommand { public: DropDBCmd() : PublicGridCommand( "dropDatabase" ) {} - bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { BSONElement e = cmdObj.firstElement(); @@ -275,7 +332,7 @@ namespace mongo { class RenameCollectionCmd : public PublicGridCommand { public: RenameCollectionCmd() : PublicGridCommand( "renameCollection" ) {} - bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string fullnsFrom = cmdObj.firstElement().valuestrsafe(); string dbNameFrom = nsToDatabase( fullnsFrom.c_str() ); DBConfigPtr confFrom = grid.getDBConfig( dbNameFrom , false ); @@ -300,7 +357,7 @@ namespace mongo { class CopyDBCmd : public PublicGridCommand { public: CopyDBCmd() : PublicGridCommand( "copydb" ) {} - bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string todb = cmdObj.getStringField("todb"); uassert(13402, "need a todb argument", !todb.empty()); @@ -336,7 +393,8 @@ namespace mongo { class CountCmd : public PublicGridCommand { public: CountCmd() : PublicGridCommand("count") { } - bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool l) { + virtual bool passOptions() const { return true; } + bool run(const string& dbName, BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) { string collection = cmdObj.firstElement().valuestrsafe(); string fullns = dbName + "." + collection; @@ -345,12 +403,11 @@ namespace mongo { filter = cmdObj["query"].Obj(); DBConfigPtr conf = grid.getDBConfig( dbName , false ); - if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) { ShardConnection conn( conf->getPrimary() , fullns ); BSONObj temp; - bool ok = conn->runCommand( dbName , cmdObj , temp ); + bool ok = conn->runCommand( dbName , cmdObj , temp, options ); conn.done(); if ( ok ) { @@ -365,7 +422,7 @@ namespace mongo { } // this collection got sharded - ChunkManagerPtr cm = conf->getChunkManager( fullns , true ); + ChunkManagerPtr cm = conf->getChunkManagerIfExists( fullns , true ); if ( ! cm ) { errmsg = "should be sharded now"; result.append( "root" , temp ); @@ -376,11 +433,11 @@ namespace mongo { long long total = 0; map<string,long long> shardCounts; - ChunkManagerPtr cm = conf->getChunkManager( fullns ); + ChunkManagerPtr cm = conf->getChunkManagerIfExists( fullns ); while ( true ) { if ( ! cm ) { // probably unsharded now - return run( dbName , cmdObj , errmsg , result , l ); + return run( dbName , cmdObj , options , errmsg , result, false ); } set<Shard> shards; @@ -394,14 +451,14 @@ namespace mongo { if ( conn.setVersion() ) { total = 0; shardCounts.clear(); - cm = conf->getChunkManager( fullns ); + cm = conf->getChunkManagerIfExists( fullns ); conn.done(); hadToBreak = true; break; } BSONObj temp; - bool ok = conn->runCommand( dbName , BSON( "count" << collection << "query" << filter ) , temp ); + bool ok = conn->runCommand( dbName , BSON( "count" << collection << "query" << filter ) , temp, options ); conn.done(); if ( ok ) { @@ -415,7 +472,7 @@ namespace mongo { // my version is old total = 0; shardCounts.clear(); - cm = conf->getChunkManager( fullns , true ); + cm = conf->getChunkManagerIfExists( fullns , true ); hadToBreak = true; break; } @@ -442,14 +499,13 @@ namespace mongo { class CollectionStats : public PublicGridCommand { public: CollectionStats() : PublicGridCommand("collStats", "collstats") { } - bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string collection = cmdObj.firstElement().valuestrsafe(); string fullns = dbName + "." + collection; DBConfigPtr conf = grid.getDBConfig( dbName , false ); if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) { - result.append( "ns" , fullns ); result.appendBool("sharded", false); result.append( "primary" , conf->getPrimary().getName() ); return passthrough( conf , cmdObj , result); @@ -463,9 +519,13 @@ namespace mongo { cm->getAllShards(servers); BSONObjBuilder shardStats; + map<string,long long> counts; + map<string,long long> indexSizes; + /* long long count=0; long long size=0; long long storageSize=0; + */ int nindexes=0; bool warnedAboutIndexes = false; for ( set<Shard>::iterator i=servers.begin(); i!=servers.end(); i++ ) { @@ -476,39 +536,82 @@ namespace mongo { return false; } conn.done(); - - count += res["count"].numberLong(); - size += res["size"].numberLong(); - storageSize += res["storageSize"].numberLong(); - - int myIndexes = res["nindexes"].numberInt(); - - if ( nindexes == 0 ) { - nindexes = myIndexes; - } - else if ( nindexes == myIndexes ) { - // no-op - } - else { - // hopefully this means we're building an index - - if ( myIndexes > nindexes ) - nindexes = myIndexes; - - if ( ! warnedAboutIndexes ) { - result.append( "warning" , "indexes don't all match - ok if ensureIndex is running" ); - warnedAboutIndexes = true; + + BSONObjIterator j( res ); + while ( j.more() ) { + BSONElement e = j.next(); + + if ( str::equals( e.fieldName() , "ns" ) || + str::equals( e.fieldName() , "ok" ) || + str::equals( e.fieldName() , "avgObjSize" ) || + str::equals( e.fieldName() , "lastExtentSize" ) || + str::equals( e.fieldName() , "paddingFactor" ) ) { + continue; + } + else if ( str::equals( e.fieldName() , "count" ) || + str::equals( e.fieldName() , "size" ) || + str::equals( e.fieldName() , "storageSize" ) || + str::equals( e.fieldName() , "numExtents" ) || + str::equals( e.fieldName() , "totalIndexSize" ) ) { + counts[e.fieldName()] += e.numberLong(); + } + else if ( str::equals( e.fieldName() , "indexSizes" ) ) { + BSONObjIterator k( e.Obj() ); + while ( k.more() ) { + BSONElement temp = k.next(); + indexSizes[temp.fieldName()] += temp.numberLong(); + } + } + else if ( str::equals( e.fieldName() , "flags" ) ) { + if ( ! result.hasField( e.fieldName() ) ) + result.append( e ); } + else if ( str::equals( e.fieldName() , "nindexes" ) ) { + int myIndexes = e.numberInt(); + + if ( nindexes == 0 ) { + nindexes = myIndexes; + } + else if ( nindexes == myIndexes ) { + // no-op + } + else { + // hopefully this means we're building an index + + if ( myIndexes > nindexes ) + nindexes = myIndexes; + + if ( ! warnedAboutIndexes ) { + result.append( "warning" , "indexes don't all match - ok if ensureIndex is running" ); + warnedAboutIndexes = true; + } + } + } + else { + warning() << "mongos collstats doesn't know about: " << e.fieldName() << endl; + } + } - shardStats.append(i->getName(), res); } result.append("ns", fullns); - result.appendNumber("count", count); - result.appendNumber("size", size); - result.append ("avgObjSize", double(size) / double(count)); - result.appendNumber("storageSize", storageSize); + + for ( map<string,long long>::iterator i=counts.begin(); i!=counts.end(); ++i ) + result.appendNumber( i->first , i->second ); + + { + BSONObjBuilder ib( result.subobjStart( "indexSizes" ) ); + for ( map<string,long long>::iterator i=indexSizes.begin(); i!=indexSizes.end(); ++i ) + ib.appendNumber( i->first , i->second ); + ib.done(); + } + + if ( counts["count"] > 0 ) + result.append("avgObjSize", (double)counts["size"] / (double)counts["count"] ); + else + result.append( "avgObjSize", 0.0 ); + result.append("nindexes", nindexes); result.append("nchunks", cm->numChunks()); @@ -521,7 +624,7 @@ namespace mongo { class FindAndModifyCmd : public PublicGridCommand { public: FindAndModifyCmd() : PublicGridCommand("findAndModify", "findandmodify") { } - bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string collection = cmdObj.firstElement().valuestrsafe(); string fullns = dbName + "." + collection; @@ -532,7 +635,7 @@ namespace mongo { } ChunkManagerPtr cm = conf->getChunkManager( fullns ); - massert( 13002 , "how could chunk manager be null!" , cm ); + massert( 13002 , "shard internal error chunk manager should never be null" , cm ); BSONObj filter = cmdObj.getObjectField("query"); uassert(13343, "query for sharded findAndModify must have shardkey", cm->hasShardKey(filter)); @@ -558,7 +661,7 @@ namespace mongo { class DataSizeCmd : public PublicGridCommand { public: DataSizeCmd() : PublicGridCommand("dataSize", "datasize") { } - bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string fullns = cmdObj.firstElement().String(); DBConfigPtr conf = grid.getDBConfig( dbName , false ); @@ -622,7 +725,7 @@ namespace mongo { class GroupCmd : public NotAllowedOnShardedCollectionCmd { public: GroupCmd() : NotAllowedOnShardedCollectionCmd("group") {} - + virtual bool passOptions() const { return true; } virtual string getFullNS( const string& dbName , const BSONObj& cmdObj ) { return dbName + "." + cmdObj.firstElement().embeddedObjectUserCheck()["ns"].valuestrsafe(); } @@ -635,14 +738,15 @@ namespace mongo { virtual void help( stringstream &help ) const { help << "{ distinct : 'collection name' , key : 'a.b' , query : {} }"; } - bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + virtual bool passOptions() const { return true; } + bool run(const string& dbName , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) { string collection = cmdObj.firstElement().valuestrsafe(); string fullns = dbName + "." + collection; DBConfigPtr conf = grid.getDBConfig( dbName , false ); if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) { - return passthrough( conf , cmdObj , result ); + return passthrough( conf , cmdObj , options, result ); } ChunkManagerPtr cm = conf->getChunkManager( fullns ); @@ -658,7 +762,7 @@ namespace mongo { for ( set<Shard>::iterator i=shards.begin(), end=shards.end() ; i != end; ++i ) { ShardConnection conn( *i , fullns ); BSONObj res; - bool ok = conn->runCommand( conf->getName() , cmdObj , res ); + bool ok = conn->runCommand( conf->getName() , cmdObj , res, options ); conn.done(); if ( ! ok ) { @@ -693,7 +797,7 @@ namespace mongo { virtual void help( stringstream &help ) const { help << " example: { filemd5 : ObjectId(aaaaaaa) , root : \"fs\" }"; } - bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string fullns = dbName; fullns += "."; { @@ -730,15 +834,15 @@ namespace mongo { public: Geo2dFindNearCmd() : PublicGridCommand( "geoNear" ) {} void help(stringstream& h) const { h << "http://www.mongodb.org/display/DOCS/Geospatial+Indexing#GeospatialIndexing-geoNearCommand"; } - - bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + virtual bool passOptions() const { return true; } + bool run(const string& dbName , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) { string collection = cmdObj.firstElement().valuestrsafe(); string fullns = dbName + "." + collection; DBConfigPtr conf = grid.getDBConfig( dbName , false ); if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) { - return passthrough( conf , cmdObj , result ); + return passthrough( conf , cmdObj , options, result ); } ChunkManagerPtr cm = conf->getChunkManager( fullns ); @@ -755,7 +859,7 @@ namespace mongo { list< shared_ptr<Future::CommandResult> > futures; BSONArrayBuilder shardArray; for ( set<Shard>::const_iterator i=shards.begin(), end=shards.end() ; i != end ; i++ ) { - futures.push_back( Future::spawnCommand( i->getConnString() , dbName , cmdObj ) ); + futures.push_back( Future::spawnCommand( i->getConnString() , dbName , cmdObj, options ) ); shardArray.append(i->getName()); } @@ -820,12 +924,13 @@ namespace mongo { class MRCmd : public PublicGridCommand { public: + AtomicUInt JOB_NUMBER; + MRCmd() : PublicGridCommand( "mapreduce" ) {} string getTmpName( const string& coll ) { - static int inc = 1; stringstream ss; - ss << "tmp.mrs." << coll << "_" << time(0) << "_" << inc++; + ss << "tmp.mrs." << coll << "_" << time(0) << "_" << JOB_NUMBER++; return ss.str(); } @@ -851,8 +956,8 @@ namespace mongo { if (fn == "out" && e.type() == Object) { // check if there is a custom output BSONObj out = e.embeddedObject(); - if (out.hasField("db")) - customOut = out; +// if (out.hasField("db")) + customOut = out; } } else { @@ -864,7 +969,7 @@ namespace mongo { return b.obj(); } - bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { Timer t; string collection = cmdObj.firstElement().valuestrsafe(); @@ -876,7 +981,7 @@ namespace mongo { BSONObj customOut; BSONObj shardedCommand = fixForShards( cmdObj , shardedOutputCollection, customOut , badShardedField ); - bool customOutDB = ! customOut.isEmpty() && customOut.hasField( "db" ); + bool customOutDB = customOut.hasField( "db" ); DBConfigPtr conf = grid.getDBConfig( dbName , false ); @@ -911,26 +1016,32 @@ namespace mongo { finalCmd.append( "shardedOutputCollection" , shardedOutputCollection ); + set<ServerAndQuery> servers; + BSONObj shardCounts; + BSONObj aggCounts; + map<string,long long> countsMap; { // we need to use our connections to the shard // so filtering is done correctly for un-owned docs // so we allocate them in our thread // and hand off - + // Note: why not use pooled connections? This has been reported to create too many connections vector< shared_ptr<ShardConnection> > shardConns; - list< shared_ptr<Future::CommandResult> > futures; for ( set<Shard>::iterator i=shards.begin(), end=shards.end() ; i != end ; i++ ) { shared_ptr<ShardConnection> temp( new ShardConnection( i->getConnString() , fullns ) ); assert( temp->get() ); - futures.push_back( Future::spawnCommand( i->getConnString() , dbName , shardedCommand , temp->get() ) ); + futures.push_back( Future::spawnCommand( i->getConnString() , dbName , shardedCommand , 0 , temp->get() ) ); shardConns.push_back( temp ); } bool failed = false; - - BSONObjBuilder shardresults; + + // now wait for the result of all shards + BSONObjBuilder shardResultsB; + BSONObjBuilder shardCountsB; + BSONObjBuilder aggCountsB; for ( list< shared_ptr<Future::CommandResult> >::iterator i=futures.begin(); i!=futures.end(); i++ ) { shared_ptr<Future::CommandResult> res = *i; if ( ! res->join() ) { @@ -941,7 +1052,19 @@ namespace mongo { failed = true; continue; } - shardresults.append( res->getServer() , res->result() ); + BSONObj result = res->result(); + shardResultsB.append( res->getServer() , result ); + BSONObj counts = result["counts"].embeddedObjectUserCheck(); + shardCountsB.append( res->getServer() , counts ); + servers.insert(res->getServer()); + + // add up the counts for each shard + // some of them will be fixed later like output and reduce + BSONObjIterator j( counts ); + while ( j.more() ) { + BSONElement temp = j.next(); + countsMap[temp.fieldName()] += temp.numberLong(); + } } for ( unsigned i=0; i<shardConns.size(); i++ ) @@ -950,28 +1073,205 @@ namespace mongo { if ( failed ) return 0; - finalCmd.append( "shards" , shardresults.obj() ); + finalCmd.append( "shards" , shardResultsB.obj() ); + shardCounts = shardCountsB.obj(); + finalCmd.append( "shardCounts" , shardCounts ); timingBuilder.append( "shards" , t.millis() ); + + for ( map<string,long long>::iterator i=countsMap.begin(); i!=countsMap.end(); i++ ) { + aggCountsB.append( i->first , i->second ); + } + aggCounts = aggCountsB.obj(); + finalCmd.append( "counts" , aggCounts ); } Timer t2; - // by default the target database is same as input - Shard outServer = conf->getPrimary(); - string outns = fullns; - if ( customOutDB ) { - // have to figure out shard for the output DB - BSONElement elmt = customOut.getField("db"); - string outdb = elmt.valuestrsafe(); - outns = outdb + "." + collection; - DBConfigPtr conf2 = grid.getDBConfig( outdb , true ); - outServer = conf2->getPrimary(); - } - log() << "customOut: " << customOut << " outServer: " << outServer << endl; - - ShardConnection conn( outServer , outns ); BSONObj finalResult; - bool ok = conn->runCommand( dbName , finalCmd.obj() , finalResult ); - conn.done(); + bool ok = false; + string outdb = dbName; + if (customOutDB) { + BSONElement elmt = customOut.getField("db"); + outdb = elmt.valuestrsafe(); + } + + if (!customOut.getBoolField("sharded")) { + // non-sharded, use the MRFinish command on target server + // This will save some data transfer + + // by default the target database is same as input + Shard outServer = conf->getPrimary(); + string outns = fullns; + if ( customOutDB ) { + // have to figure out shard for the output DB + DBConfigPtr conf2 = grid.getDBConfig( outdb , true ); + outServer = conf2->getPrimary(); + outns = outdb + "." + collection; + } + log() << "customOut: " << customOut << " outServer: " << outServer << endl; + + ShardConnection conn( outServer , outns ); + ok = conn->runCommand( dbName , finalCmd.obj() , finalResult ); + conn.done(); + } else { + // grab records from each shard and insert back in correct shard in "temp" collection + // we do the final reduce in mongos since records are ordered and already reduced on each shard +// string shardedIncLong = str::stream() << outdb << ".tmp.mr." << collection << "_" << "shardedTemp" << "_" << time(0) << "_" << JOB_NUMBER++; + + mr_shard::Config config( dbName , cmdObj ); + mr_shard::State state(config); + LOG(1) << "mr sharded output ns: " << config.ns << endl; + + if (config.outType == mr_shard::Config::INMEMORY) { + errmsg = "This Map Reduce mode is not supported with sharded output"; + return false; + } + + if (!config.outDB.empty()) { + BSONObjBuilder loc; + if ( !config.outDB.empty()) + loc.append( "db" , config.outDB ); + loc.append( "collection" , config.finalShort ); + result.append("result", loc.obj()); + } + else { + if ( !config.finalShort.empty() ) + result.append( "result" , config.finalShort ); + } + + string outns = config.finalLong; + string tempns; + + // result will be inserted into a temp collection to post process + const string postProcessCollection = getTmpName( collection ); + finalCmd.append("postProcessCollection", postProcessCollection); + tempns = dbName + "." + postProcessCollection; + +// if (config.outType == mr_shard::Config::REPLACE) { +// // drop previous collection +// BSONObj dropColCmd = BSON("drop" << config.finalShort); +// BSONObjBuilder dropColResult(32); +// string outdbCmd = outdb + ".$cmd"; +// bool res = Command::runAgainstRegistered(outdbCmd.c_str(), dropColCmd, dropColResult); +// if (!res) { +// errmsg = str::stream() << "Could not drop sharded output collection " << outns << ": " << dropColResult.obj().toString(); +// return false; +// } +// } + + BSONObj sortKey = BSON( "_id" << 1 ); + if (!conf->isSharded(outns)) { + // create the sharded collection + + BSONObj shardColCmd = BSON("shardCollection" << outns << "key" << sortKey); + BSONObjBuilder shardColResult(32); + bool res = Command::runAgainstRegistered("admin.$cmd", shardColCmd, shardColResult); + if (!res) { + errmsg = str::stream() << "Could not create sharded output collection " << outns << ": " << shardColResult.obj().toString(); + return false; + } + } + + ParallelSortClusteredCursor cursor( servers , dbName + "." + shardedOutputCollection , + Query().sort( sortKey ) ); + cursor.init(); + state.init(); + + mr_shard::BSONList values; + Strategy* s = SHARDED; + long long finalCount = 0; + int currentSize = 0; + while ( cursor.more() || !values.empty() ) { + BSONObj t; + if ( cursor.more() ) { + t = cursor.next().getOwned(); + + if ( values.size() == 0 || t.woSortOrder( *(values.begin()) , sortKey ) == 0 ) { + values.push_back( t ); + currentSize += t.objsize(); + + // check size and potentially reduce + if (currentSize > config.maxInMemSize && values.size() > config.reduceTriggerRatio) { + BSONObj reduced = config.reducer->finalReduce(values, 0); + values.clear(); + values.push_back( reduced ); + currentSize = reduced.objsize(); + } + continue; + } + } + + BSONObj final = config.reducer->finalReduce(values, config.finalizer.get()); + if (config.outType == mr_shard::Config::MERGE) { + BSONObj id = final["_id"].wrap(); + s->updateSharded(conf, outns.c_str(), id, final, UpdateOption_Upsert, true); + } else { + // insert into temp collection, but using final collection's shard chunks + s->insertSharded(conf, tempns.c_str(), final, 0, true, outns.c_str()); + } + ++finalCount; + values.clear(); + if (!t.isEmpty()) { + values.push_back( t ); + currentSize = t.objsize(); + } + } + + if (config.outType == mr_shard::Config::REDUCE || config.outType == mr_shard::Config::REPLACE) { + // results were written to temp collection, need post processing + vector< shared_ptr<ShardConnection> > shardConns; + list< shared_ptr<Future::CommandResult> > futures; + BSONObj finalCmdObj = finalCmd.obj(); + for ( set<Shard>::iterator i=shards.begin(), end=shards.end() ; i != end ; i++ ) { + shared_ptr<ShardConnection> temp( new ShardConnection( i->getConnString() , outns ) ); + futures.push_back( Future::spawnCommand( i->getConnString() , dbName , finalCmdObj , 0 , temp->get() ) ); + shardConns.push_back( temp ); + } + + // now wait for the result of all shards + bool failed = false; + for ( list< shared_ptr<Future::CommandResult> >::iterator i=futures.begin(); i!=futures.end(); i++ ) { + shared_ptr<Future::CommandResult> res = *i; + if ( ! res->join() ) { + error() << "final reduce on sharded output m/r failed on shard: " << res->getServer() << " error: " << res->result() << endl; + result.append( "cause" , res->result() ); + errmsg = "mongod mr failed: "; + errmsg += res->result().toString(); + failed = true; + continue; + } + BSONObj result = res->result(); + } + + for ( unsigned i=0; i<shardConns.size(); i++ ) + shardConns[i]->done(); + + if (failed) + return 0; + } + + for ( set<ServerAndQuery>::iterator i=servers.begin(); i!=servers.end(); i++ ) { + ScopedDbConnection conn( i->_server ); + conn->dropCollection( dbName + "." + shardedOutputCollection ); + conn.done(); + } + + result.append("shardCounts", shardCounts); + + // fix the global counts + BSONObjBuilder countsB(32); + BSONObjIterator j(aggCounts); + while (j.more()) { + BSONElement elmt = j.next(); + if (!strcmp(elmt.fieldName(), "reduce")) + countsB.append("reduce", elmt.numberLong() + state.numReduces()); + else if (!strcmp(elmt.fieldName(), "output")) + countsB.append("output", finalCount); + else + countsB.append(elmt); + } + result.append( "counts" , countsB.obj() ); + ok = true; + } if ( ! ok ) { errmsg = "final reduce failed: "; @@ -991,14 +1291,81 @@ namespace mongo { class ApplyOpsCmd : public PublicGridCommand { public: ApplyOpsCmd() : PublicGridCommand( "applyOps" ) {} - - virtual bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + virtual bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { errmsg = "applyOps not allowed through mongos"; return false; } - } applyOpsCmd; + class CompactCmd : public PublicGridCommand { + public: + CompactCmd() : PublicGridCommand( "compact" ) {} + virtual bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { + errmsg = "compact not allowed through mongos"; + return false; + } + } compactCmd; + } + bool Command::runAgainstRegistered(const char *ns, BSONObj& jsobj, BSONObjBuilder& anObjBuilder, int queryOptions) { + const char *p = strchr(ns, '.'); + if ( !p ) return false; + if ( strcmp(p, ".$cmd") != 0 ) return false; + + bool ok = false; + + BSONElement e = jsobj.firstElement(); + map<string,Command*>::iterator i; + + if ( e.eoo() ) + ; + // check for properly registered command objects. + else if ( (i = _commands->find(e.fieldName())) != _commands->end() ) { + string errmsg; + Command *c = i->second; + ClientInfo *client = ClientInfo::get(); + AuthenticationInfo *ai = client->getAuthenticationInfo(); + + char cl[256]; + nsToDatabase(ns, cl); + if( c->requiresAuth() && !ai->isAuthorized(cl)) { + ok = false; + errmsg = "unauthorized"; + } + else if( c->adminOnly() && c->localHostOnlyIfNoAuth( jsobj ) && noauth && !ai->isLocalHost ) { + ok = false; + errmsg = "unauthorized: this command must run from localhost when running db without auth"; + log() << "command denied: " << jsobj.toString() << endl; + } + else if ( c->adminOnly() && !startsWith(ns, "admin.") ) { + ok = false; + errmsg = "access denied - use admin db"; + } + else if ( jsobj.getBoolField( "help" ) ) { + stringstream help; + help << "help for: " << e.fieldName() << " "; + c->help( help ); + anObjBuilder.append( "help" , help.str() ); + } + else { + ok = c->run( nsToDatabase( ns ) , jsobj, queryOptions, errmsg, anObjBuilder, false ); + } + + BSONObj tmp = anObjBuilder.asTempObj(); + bool have_ok = tmp.hasField("ok"); + bool have_errmsg = tmp.hasField("errmsg"); + + if (!have_ok) + anObjBuilder.append( "ok" , ok ? 1.0 : 0.0 ); + + if ( !ok && !have_errmsg) { + anObjBuilder.append("errmsg", errmsg); + uassert_nothrow(errmsg.c_str()); + } + return true; + } + + return false; + } } diff --git a/s/config.cpp b/s/config.cpp index 0766717..23475eb 100644 --- a/s/config.cpp +++ b/s/config.cpp @@ -17,11 +17,9 @@ */ #include "pch.h" -#include "../util/message.h" +#include "../util/net/message.h" #include "../util/stringutils.h" #include "../util/unittest.h" -#include "../util/timer.h" - #include "../client/connpool.h" #include "../client/model.h" #include "../db/pdfile.h" @@ -56,19 +54,18 @@ namespace mongo { _dirty = false; _dropped = in["dropped"].trueValue(); if ( in["key"].isABSONObj() ) { - Timer t; - shard( in["_id"].String() , in["key"].Obj() , in["unique"].trueValue() ); - log() << "creating ChunkManager ns: " << in["_id"] - << " took: " << t.millis() << "ms" - << " sequenceNumber: " << _cm->getSequenceNumber() - << endl; - _dirty = false; + _key = in["key"].Obj().getOwned(); + _unqiue = in["unique"].trueValue(); + shard( in["_id"].String() , _key , _unqiue ); } + _dirty = false; } - + void DBConfig::CollectionInfo::shard( const string& ns , const ShardKeyPattern& key , bool unique ) { _cm.reset( new ChunkManager( ns , key , unique ) ); + _key = key.key().getOwned(); + _unqiue = unique; _dirty = true; _dropped = false; } @@ -77,6 +74,7 @@ namespace mongo { _cm.reset(); _dropped = true; _dirty = true; + _key = BSONObj(); } void DBConfig::CollectionInfo::save( const string& ns , DBClientBase* conn ) { @@ -96,32 +94,6 @@ namespace mongo { _dirty = false; } - bool DBConfig::CollectionInfo::needsReloading( DBClientBase * conn , const BSONObj& collectionInfo ) { - if ( ! _cm ) { - return true; - } - - if ( _dirty || _dropped ) { - return true; - } - - if ( collectionInfo["dropped"].trueValue() ) { - return true; - } - - BSONObj newest = conn->findOne( ShardNS::chunk , - Query( BSON( "ns" << collectionInfo["_id"].String() ) ).sort( "lastmod" , -1 ) ); - - if ( newest.isEmpty() ) { - // either a drop or something else weird - return true; - } - - ShardChunkVersion fromdb = newest["lastmod"]; - ShardChunkVersion inmemory = _cm->getVersion(); - return fromdb != inmemory; - } - bool DBConfig::isSharded( const string& ns ) { if ( ! _shardingEnabled ) return false; @@ -160,34 +132,36 @@ namespace mongo { ChunkManagerPtr DBConfig::shardCollection( const string& ns , ShardKeyPattern fieldsAndOrder , bool unique ) { uassert( 8042 , "db doesn't have sharding enabled" , _shardingEnabled ); + uassert( 13648 , str::stream() << "can't shard collection because not all config servers are up" , configServer.allUp() ); - scoped_lock lk( _lock ); + + { + scoped_lock lk( _lock ); - CollectionInfo& ci = _collections[ns]; - uassert( 8043 , "collection already sharded" , ! ci.isSharded() ); + CollectionInfo& ci = _collections[ns]; + uassert( 8043 , "collection already sharded" , ! ci.isSharded() ); - log() << "enable sharding on: " << ns << " with shard key: " << fieldsAndOrder << endl; + log() << "enable sharding on: " << ns << " with shard key: " << fieldsAndOrder << endl; - // From this point on, 'ns' is going to be treated as a sharded collection. We assume this is the first - // time it is seen by the sharded system and thus create the first chunk for the collection. All the remaining - // chunks will be created as a by-product of splitting. - ci.shard( ns , fieldsAndOrder , unique ); - ChunkManagerPtr cm = ci.getCM(); - uassert( 13449 , "collections already sharded" , (cm->numChunks() == 0) ); - cm->createFirstChunk( getPrimary() ); - _save(); + // From this point on, 'ns' is going to be treated as a sharded collection. We assume this is the first + // time it is seen by the sharded system and thus create the first chunk for the collection. All the remaining + // chunks will be created as a by-product of splitting. + ci.shard( ns , fieldsAndOrder , unique ); + ChunkManagerPtr cm = ci.getCM(); + uassert( 13449 , "collections already sharded" , (cm->numChunks() == 0) ); + cm->createFirstChunk( getPrimary() ); + _save(); + } try { - if ( cm->maybeChunkCollection() ) { - _load(); - } + getChunkManager(ns, true)->maybeChunkCollection(); } catch ( UserException& e ) { // failure to chunk is not critical enough to abort the command (and undo the _save()'d configDB state) log() << "couldn't chunk recently created collection: " << ns << " " << e << endl; } - return cm; + return getChunkManager(ns); } bool DBConfig::removeSharding( const string& ns ) { @@ -207,18 +181,89 @@ namespace mongo { return false; ci.unshard(); - _save(); + _save( false, true ); return true; } + ChunkManagerPtr DBConfig::getChunkManagerIfExists( const string& ns, bool shouldReload ){ + try{ + return getChunkManager( ns, shouldReload ); + } + catch( AssertionException& e ){ + warning() << "chunk manager not found for " << ns << causedBy( e ) << endl; + return ChunkManagerPtr(); + } + } + ChunkManagerPtr DBConfig::getChunkManager( const string& ns , bool shouldReload ) { - scoped_lock lk( _lock ); + BSONObj key; + bool unique; + ShardChunkVersion oldVersion; - if ( shouldReload ) - _reload(); + { + scoped_lock lk( _lock ); + + CollectionInfo& ci = _collections[ns]; + + bool earlyReload = ! ci.isSharded() && shouldReload; + if ( earlyReload ) { + // this is to catch cases where there this is a new sharded collection + _reload(); + ci = _collections[ns]; + } + massert( 10181 , (string)"not sharded:" + ns , ci.isSharded() ); + assert( ! ci.key().isEmpty() ); + + if ( ! shouldReload || earlyReload ) + return ci.getCM(); + key = ci.key().copy(); + unique = ci.unique(); + if ( ci.getCM() ) + oldVersion = ci.getCM()->getVersion(); + } + + assert( ! key.isEmpty() ); + + if ( oldVersion > 0 ) { + ScopedDbConnection conn( configServer.modelServer() , 30.0 ); + BSONObj newest = conn->findOne( ShardNS::chunk , + Query( BSON( "ns" << ns ) ).sort( "lastmod" , -1 ) ); + conn.done(); + + if ( ! newest.isEmpty() ) { + ShardChunkVersion v = newest["lastmod"]; + if ( v == oldVersion ) { + scoped_lock lk( _lock ); + CollectionInfo& ci = _collections[ns]; + massert( 15885 , str::stream() << "not sharded after reloading from chunks : " << ns , ci.isSharded() ); + return ci.getCM(); + } + } + + } + + // we are not locked now, and want to load a new ChunkManager + + auto_ptr<ChunkManager> temp( new ChunkManager( ns , key , unique ) ); + if ( temp->numChunks() == 0 ) { + // maybe we're not sharded any more + reload(); // this is a full reload + return getChunkManager( ns , false ); + } + + scoped_lock lk( _lock ); + CollectionInfo& ci = _collections[ns]; - massert( 10181 , (string)"not sharded:" + ns , ci.isSharded() || ci.wasDropped() ); + massert( 14822 , (string)"state changed in the middle: " + ns , ci.isSharded() ); + + if ( temp->getVersion() > ci.getCM()->getVersion() ) { + // we only want to reset if we're newer + // otherwise we go into a bad cycle + ci.resetCM( temp.release() ); + } + + massert( 15883 , str::stream() << "not sharded after chunk manager reset : " << ns , ci.isSharded() ); return ci.getCM(); } @@ -235,7 +280,7 @@ namespace mongo { } void DBConfig::unserialize(const BSONObj& from) { - log(1) << "DBConfig unserialize: " << _name << " " << from << endl; + LOG(1) << "DBConfig unserialize: " << _name << " " << from << endl; assert( _name == from["_id"].String() ); _shardingEnabled = from.getBoolField("partitioned"); @@ -255,7 +300,7 @@ namespace mongo { } bool DBConfig::_load() { - ScopedDbConnection conn( configServer.modelServer() ); + ScopedDbConnection conn( configServer.modelServer(), 30.0 ); BSONObj o = conn->findOne( ShardNS::database , BSON( "_id" << _name ) ); @@ -273,14 +318,8 @@ namespace mongo { assert( cursor.get() ); while ( cursor->more() ) { BSONObj o = cursor->next(); - string ns = o["_id"].String(); - - Collections::iterator i = _collections.find( ns ); - if ( i != _collections.end() && ! i->second.needsReloading( conn.get() , o ) ) { - continue; - } - - _collections[ns] = CollectionInfo( o ); + if( o["dropped"].trueValue() ) _collections.erase( o["_id"].String() ); + else _collections[o["_id"].String()] = CollectionInfo( o ); } conn.done(); @@ -288,24 +327,32 @@ namespace mongo { return true; } - void DBConfig::_save() { - ScopedDbConnection conn( configServer.modelServer() ); + void DBConfig::_save( bool db, bool coll ) { + ScopedDbConnection conn( configServer.modelServer(), 30.0 ); + + if( db ){ + + BSONObj n; + { + BSONObjBuilder b; + serialize(b); + n = b.obj(); + } + + conn->update( ShardNS::database , BSON( "_id" << _name ) , n , true ); + string err = conn->getLastError(); + uassert( 13396 , (string)"DBConfig save failed: " + err , err.size() == 0 ); - BSONObj n; - { - BSONObjBuilder b; - serialize(b); - n = b.obj(); } - conn->update( ShardNS::database , BSON( "_id" << _name ) , n , true ); - string err = conn->getLastError(); - uassert( 13396 , (string)"DBConfig save failed: " + err , err.size() == 0 ); + if( coll ){ + + for ( Collections::iterator i=_collections.begin(); i!=_collections.end(); ++i ) { + if ( ! i->second.isDirty() ) + continue; + i->second.save( i->first , conn.get() ); + } - for ( Collections::iterator i=_collections.begin(); i!=_collections.end(); ++i ) { - if ( ! i->second.isDirty() ) - continue; - i->second.save( i->first , conn.get() ); } conn.done(); @@ -335,14 +382,14 @@ namespace mongo { // 1 if ( ! configServer.allUp( errmsg ) ) { - log(1) << "\t DBConfig::dropDatabase not all up" << endl; + LOG(1) << "\t DBConfig::dropDatabase not all up" << endl; return 0; } // 2 grid.removeDB( _name ); { - ScopedDbConnection conn( configServer.modelServer() ); + ScopedDbConnection conn( configServer.modelServer(), 30.0 ); conn->remove( ShardNS::database , BSON( "_id" << _name ) ); errmsg = conn->getLastError(); if ( ! errmsg.empty() ) { @@ -358,7 +405,7 @@ namespace mongo { log() << "error removing from config server even after checking!" << endl; return 0; } - log(1) << "\t removed entry from config server for: " << _name << endl; + LOG(1) << "\t removed entry from config server for: " << _name << endl; set<Shard> allServers; @@ -374,7 +421,7 @@ namespace mongo { // 4 { - ScopedDbConnection conn( _primary ); + ScopedDbConnection conn( _primary, 30.0 ); BSONObj res; if ( ! conn->dropDatabase( _name , &res ) ) { errmsg = res.toString(); @@ -385,7 +432,7 @@ namespace mongo { // 5 for ( set<Shard>::iterator i=allServers.begin(); i!=allServers.end(); i++ ) { - ScopedDbConnection conn( *i ); + ScopedDbConnection conn( *i, 30.0 ); BSONObj res; if ( ! conn->dropDatabase( _name , &res ) ) { errmsg = res.toString(); @@ -394,7 +441,7 @@ namespace mongo { conn.done(); } - log(1) << "\t dropped primary db for: " << _name << endl; + LOG(1) << "\t dropped primary db for: " << _name << endl; configServer.logChange( "dropDatabase" , _name , BSONObj() ); return true; @@ -406,6 +453,7 @@ namespace mongo { while ( true ) { Collections::iterator i = _collections.begin(); for ( ; i != _collections.end(); ++i ) { + // log() << "coll : " << i->first << " and " << i->second.isSharded() << endl; if ( i->second.isSharded() ) break; } @@ -419,7 +467,7 @@ namespace mongo { } seen.insert( i->first ); - log(1) << "\t dropping sharded collection: " << i->first << endl; + LOG(1) << "\t dropping sharded collection: " << i->first << endl; i->second.getCM()->getAllShards( allServers ); i->second.getCM()->drop( i->second.getCM() ); @@ -427,13 +475,14 @@ namespace mongo { num++; uassert( 10184 , "_dropShardedCollections too many collections - bailing" , num < 100000 ); - log(2) << "\t\t dropped " << num << " so far" << endl; + LOG(2) << "\t\t dropped " << num << " so far" << endl; } return true; } void DBConfig::getAllShards(set<Shard>& shards) const { + scoped_lock lk( _lock ); shards.insert(getPrimary()); for (Collections::const_iterator it(_collections.begin()), end(_collections.end()); it != end; ++it) { if (it->second.isSharded()) { @@ -493,15 +542,12 @@ namespace mongo { string fullString; joinStringDelim( configHosts, &fullString, ',' ); _primary.setAddress( ConnectionString( fullString , ConnectionString::SYNC ) ); - log(1) << " config string : " << fullString << endl; + LOG(1) << " config string : " << fullString << endl; return true; } bool ConfigServer::checkConfigServersConsistent( string& errmsg , int tries ) const { - if ( _config.size() == 1 ) - return true; - if ( tries <= 0 ) return false; @@ -511,7 +557,16 @@ namespace mongo { for ( unsigned i=0; i<_config.size(); i++ ) { BSONObj x; try { - ScopedDbConnection conn( _config[i] ); + ScopedDbConnection conn( _config[i], 30.0 ); + + // check auth + conn->update("config.foo.bar", BSONObj(), BSON("x" << 1)); + conn->simpleCommand( "admin", &x, "getlasterror"); + if (x["err"].type() == String && x["err"].String() == "unauthorized") { + errmsg = "not authorized, did you start with --keyFile?"; + return false; + } + if ( ! conn->simpleCommand( "config" , &x , "dbhash" ) ) x = BSONObj(); else { @@ -528,6 +583,9 @@ namespace mongo { res.push_back(x); } + if ( _config.size() == 1 ) + return true; + if ( up == 0 ) { errmsg = "no config servers reachable"; return false; @@ -574,7 +632,7 @@ namespace mongo { if ( checkConsistency ) { string errmsg; if ( ! checkConfigServersConsistent( errmsg ) ) { - log( LL_ERROR ) << "config servers not in sync! " << errmsg << endl; + log( LL_ERROR ) << "config servers not in sync! " << errmsg << warnings; return false; } } @@ -589,7 +647,7 @@ namespace mongo { bool ConfigServer::allUp( string& errmsg ) { try { - ScopedDbConnection conn( _primary ); + ScopedDbConnection conn( _primary, 30.0 ); conn->getLastError(); conn.done(); return true; @@ -603,7 +661,7 @@ namespace mongo { } int ConfigServer::dbConfigVersion() { - ScopedDbConnection conn( _primary ); + ScopedDbConnection conn( _primary, 30.0 ); int version = dbConfigVersion( conn.conn() ); conn.done(); return version; @@ -629,7 +687,7 @@ namespace mongo { void ConfigServer::reloadSettings() { set<string> got; - ScopedDbConnection conn( _primary ); + ScopedDbConnection conn( _primary, 30.0 ); auto_ptr<DBClientCursor> c = conn->query( ShardNS::settings , BSONObj() ); assert( c.get() ); while ( c->more() ) { @@ -637,7 +695,7 @@ namespace mongo { string name = o["_id"].valuestrsafe(); got.insert( name ); if ( name == "chunksize" ) { - log(1) << "MaxChunkSize: " << o["value"] << endl; + LOG(1) << "MaxChunkSize: " << o["value"] << endl; Chunk::MaxChunkSize = o["value"].numberInt() * 1024 * 1024; } else if ( name == "balancer" ) { @@ -703,7 +761,7 @@ namespace mongo { assert( _primary.ok() ); - ScopedDbConnection conn( _primary ); + ScopedDbConnection conn( _primary, 30.0 ); static bool createdCapped = false; if ( ! createdCapped ) { @@ -711,7 +769,7 @@ namespace mongo { conn->createCollection( "config.changelog" , 1024 * 1024 * 10 , true ); } catch ( UserException& e ) { - log(1) << "couldn't create changelog (like race condition): " << e << endl; + LOG(1) << "couldn't create changelog (like race condition): " << e << endl; // don't care } createdCapped = true; @@ -731,7 +789,7 @@ namespace mongo { void ConfigServer::replicaSetChange( const ReplicaSetMonitor * monitor ) { try { - ScopedDbConnection conn( configServer.getConnectionString() ); + ScopedDbConnection conn( configServer.getConnectionString(), 30.0 ); conn->update( ShardNS::shard , BSON( "_id" << monitor->getName() ) , BSON( "$set" << BSON( "host" << monitor->getServerAddress() ) ) ); conn.done(); } @@ -81,17 +81,27 @@ namespace mongo { return _cm; } + void resetCM( ChunkManager * cm ) { + assert(cm); + assert(_cm); // this has to be already sharded + _cm.reset( cm ); + } + void shard( const string& ns , const ShardKeyPattern& key , bool unique ); void unshard(); bool isDirty() const { return _dirty; } bool wasDropped() const { return _dropped; } - + void save( const string& ns , DBClientBase* conn ); - bool needsReloading( DBClientBase * conn , const BSONObj& collectionInfo ); + bool unique() const { return _unqiue; } + BSONObj key() const { return _key; } + private: + BSONObj _key; + bool _unqiue; ChunkManagerPtr _cm; bool _dirty; bool _dropped; @@ -133,6 +143,7 @@ namespace mongo { bool isSharded( const string& ns ); ChunkManagerPtr getChunkManager( const string& ns , bool reload = false ); + ChunkManagerPtr getChunkManagerIfExists( const string& ns , bool reload = false ); /** * @return the correct for shard for the ns @@ -172,7 +183,7 @@ namespace mongo { bool _load(); bool _reload(); - void _save(); + void _save( bool db = true, bool coll = true ); string _name; // e.g. "alleyinsider" Shard _primary; // e.g. localhost , mongo.foo.com:9999 @@ -183,7 +194,7 @@ namespace mongo { Collections _collections; - mongo::mutex _lock; // TODO: change to r/w lock ?? + mutable mongo::mutex _lock; // TODO: change to r/w lock ?? }; class ConfigServer : public DBConfig { diff --git a/s/config_migrate.cpp b/s/config_migrate.cpp index 57890a0..fff023c 100644 --- a/s/config_migrate.cpp +++ b/s/config_migrate.cpp @@ -17,7 +17,7 @@ */ #include "pch.h" -#include "../util/message.h" +#include "../util/net/message.h" #include "../util/unittest.h" #include "../client/connpool.h" #include "../client/model.h" diff --git a/s/cursors.cpp b/s/cursors.cpp index cf2735b..12b3d5e 100644 --- a/s/cursors.cpp +++ b/s/cursors.cpp @@ -22,6 +22,7 @@ #include "../db/queryutil.h" #include "../db/commands.h" #include "../util/concurrency/task.h" +#include "../util/net/listen.h" namespace mongo { @@ -111,7 +112,7 @@ namespace mongo { } bool hasMore = sendMore && _cursor->more(); - log(6) << "\t hasMore:" << hasMore << " wouldSendMoreIfHad: " << sendMore << " id:" << getId() << " totalSent: " << _totalSent << endl; + LOG(6) << "\t hasMore:" << hasMore << " wouldSendMoreIfHad: " << sendMore << " id:" << getId() << " totalSent: " << _totalSent << endl; replyToQuery( 0 , r.p() , r.m() , b.buf() , b.len() , num , _totalSent , hasMore ? getId() : 0 ); _totalSent += num; @@ -130,13 +131,15 @@ namespace mongo { CursorCache::~CursorCache() { // TODO: delete old cursors? - int logLevel = 1; + bool print = logLevel > 0; if ( _cursors.size() || _refs.size() ) - logLevel = 0; - log( logLevel ) << " CursorCache at shutdown - " - << " sharded: " << _cursors.size() - << " passthrough: " << _refs.size() - << endl; + print = true; + + if ( print ) + cout << " CursorCache at shutdown - " + << " sharded: " << _cursors.size() + << " passthrough: " << _refs.size() + << endl; } ShardedClientCursorPtr CursorCache::get( long long id ) const { @@ -184,7 +187,7 @@ namespace mongo { long long CursorCache::genId() { while ( true ) { - long long x = security.getNonce(); + long long x = Security::getNonce(); if ( x == 0 ) continue; if ( x < 0 ) @@ -272,6 +275,9 @@ namespace mongo { } log() << "killing old cursor " << i->second->getId() << " idle for: " << idleFor << "ms" << endl; // TODO: make log(1) _cursors.erase( i ); + i = _cursors.begin(); // possible 2nd entry will get skipped, will get on next pass + if ( i == _cursors.end() ) + break; } } @@ -299,7 +305,7 @@ namespace mongo { help << " example: { cursorInfo : 1 }"; } virtual LockType locktype() const { return NONE; } - bool run(const string&, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { + bool run(const string&, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { cursorCache.appendInfo( result ); if ( jsobj["setTimeout"].isNumber() ) CursorCache::TIMEOUT = jsobj["setTimeout"].numberLong(); diff --git a/s/d_chunk_manager.cpp b/s/d_chunk_manager.cpp index d4fea30..82a06f6 100644 --- a/s/d_chunk_manager.cpp +++ b/s/d_chunk_manager.cpp @@ -21,6 +21,7 @@ #include "../client/connpool.h" #include "../client/dbclientmockcursor.h" #include "../db/instance.h" +#include "../db/clientcursor.h" #include "d_chunk_manager.h" @@ -29,7 +30,7 @@ namespace mongo { ShardChunkManager::ShardChunkManager( const string& configServer , const string& ns , const string& shardName ) { // have to get a connection to the config db - // special case if i'm the configdb since i'm locked and if i connect to myself + // special case if I'm the configdb since I'm locked and if I connect to myself // its a deadlock scoped_ptr<ScopedDbConnection> scoped; scoped_ptr<DBDirectClient> direct; @@ -112,7 +113,7 @@ namespace mongo { BSONObj currMax = it->second; ++it; - // coallesce the chunk's bounds in ranges if they are adjacent chunks + // coalesce the chunk's bounds in ranges if they are adjacent chunks if ( min.isEmpty() ) { min = currMin; max = currMax; @@ -136,13 +137,23 @@ namespace mongo { static bool contains( const BSONObj& min , const BSONObj& max , const BSONObj& point ) { return point.woCompare( min ) >= 0 && point.woCompare( max ) < 0; } + + bool ShardChunkManager::belongsToMe( ClientCursor* cc ) const { + verify( 15851 , cc ); + if ( _rangesMap.size() == 0 ) + return false; + + return _belongsToMe( cc->extractFields( _key , true ) ); + } bool ShardChunkManager::belongsToMe( const BSONObj& obj ) const { if ( _rangesMap.size() == 0 ) return false; - BSONObj x = obj.extractFields(_key); + return _belongsToMe( obj.extractFields( _key , true ) ); + } + bool ShardChunkManager::_belongsToMe( const BSONObj& x ) const { RangeMap::const_iterator it = _rangesMap.upper_bound( x ); if ( it != _rangesMap.begin() ) it--; @@ -206,7 +217,7 @@ namespace mongo { ShardChunkManager* ShardChunkManager::cloneMinus( const BSONObj& min, const BSONObj& max, const ShardChunkVersion& version ) { - // check that we have the exact chunk that'll be subtracted + // check that we have the exact chunk that will be subtracted _assertChunkExists( min , max ); auto_ptr<ShardChunkManager> p( new ShardChunkManager ); @@ -282,14 +293,14 @@ namespace mongo { // // TODO drop the uniqueness constraint and tigthen the check below so that only the minor portion of version changes if ( version <= _version ) { - uasserted( 13592 , str::stream() << "version " << version.toString() << " not greater than " << _version.toString() ); + uasserted( 14039 , str::stream() << "version " << version.toString() << " not greater than " << _version.toString() ); } - // check that we have the exact chunk that'll be split and that the split point is valid + // check that we have the exact chunk that will be split and that the split point is valid _assertChunkExists( min , max ); for ( vector<BSONObj>::const_iterator it = splitKeys.begin() ; it != splitKeys.end() ; ++it ) { if ( ! contains( min , max , *it ) ) { - uasserted( 13593 , str::stream() << "can split " << min << " -> " << max << " on " << *it ); + uasserted( 14040 , str::stream() << "can split " << min << " -> " << max << " on " << *it ); } } diff --git a/s/d_chunk_manager.h b/s/d_chunk_manager.h index 9fb95e7..fd5974e 100644 --- a/s/d_chunk_manager.h +++ b/s/d_chunk_manager.h @@ -25,6 +25,8 @@ namespace mongo { + class ClientCursor; + /** * Controls the boundaries of all the chunks for a given collection that live in this shard. * @@ -102,6 +104,14 @@ namespace mongo { bool belongsToMe( const BSONObj& obj ) const; /** + * Checks whether a document belongs to this shard. + * + * @param obj document containing sharding keys (and, optionally, other attributes) + * @return true if shards hold the object + */ + bool belongsToMe( ClientCursor* cc ) const; + + /** * Given a chunk's min key (or empty doc), gets the boundary of the chunk following that one (the first). * * @param lookupKey is the min key for a previously obtained chunk or the empty document @@ -119,6 +129,13 @@ namespace mongo { string toString() const; private: + + /** + * @same as belongsToMe to but key has to be the shard key + */ + bool _belongsToMe( const BSONObj& key ) const; + + // highest ShardChunkVersion for which this ShardChunkManager's information is accurate ShardChunkVersion _version; diff --git a/s/d_logic.cpp b/s/d_logic.cpp index 1ab7c64..9d4fd74 100644 --- a/s/d_logic.cpp +++ b/s/d_logic.cpp @@ -29,7 +29,7 @@ #include "../db/commands.h" #include "../db/jsobj.h" #include "../db/dbmessage.h" -#include "../db/query.h" +#include "../db/ops/query.h" #include "../client/connpool.h" @@ -56,11 +56,11 @@ namespace mongo { DbMessage d(m); const char *ns = d.getns(); string errmsg; - if ( shardVersionOk( ns , opIsWrite( op ) , errmsg ) ) { + if ( shardVersionOk( ns , errmsg ) ) { return false; } - log(1) << "connection meta data too old - will retry ns:(" << ns << ") op:(" << opToString(op) << ") " << errmsg << endl; + LOG(1) << "connection meta data too old - will retry ns:(" << ns << ") op:(" << opToString(op) << ") " << errmsg << endl; if ( doesOpGetAResponse( op ) ) { assert( dbresponse ); @@ -87,6 +87,8 @@ namespace mongo { dbresponse->responseTo = m.header()->id; return true; } + + uassert( 9517 , "writeback" , ( d.reservedField() & DbMessage::Reserved_FromWriteback ) == 0 ); OID writebackID; writebackID.init(); @@ -95,8 +97,8 @@ namespace mongo { const OID& clientID = ShardedConnectionInfo::get(false)->getID(); massert( 10422 , "write with bad shard config and no server id!" , clientID.isSet() ); - log(1) << "got write with an old config - writing back ns: " << ns << endl; - if ( logLevel ) log(1) << debugString( m ) << endl; + LOG(1) << "got write with an old config - writing back ns: " << ns << endl; + if ( logLevel ) LOG(1) << m.toString() << endl; BSONObjBuilder b; b.appendBool( "writeBack" , true ); @@ -107,7 +109,7 @@ namespace mongo { b.appendTimestamp( "version" , shardingState.getVersion( ns ) ); b.appendTimestamp( "yourVersion" , ShardedConnectionInfo::get( true )->getVersion( ns ) ); b.appendBinData( "msg" , m.header()->len , bdtCustom , (char*)(m.singleData()) ); - log(2) << "writing back msg with len: " << m.header()->len << " op: " << m.operation() << endl; + LOG(2) << "writing back msg with len: " << m.header()->len << " op: " << m.operation() << endl; writeBackManager.queueWriteBack( clientID.str() , b.obj() ); return true; diff --git a/s/d_logic.h b/s/d_logic.h index 718836c..d96f937 100644 --- a/s/d_logic.h +++ b/s/d_logic.h @@ -173,6 +173,7 @@ namespace mongo { static ShardedConnectionInfo* get( bool create ); static void reset(); + static void addHook(); bool inForceVersionOkMode() const { return _forceVersionOk; @@ -219,7 +220,7 @@ namespace mongo { /** * @return true if the current threads shard version is ok, or not in sharded version */ - bool shardVersionOk( const string& ns , bool write , string& errmsg ); + bool shardVersionOk( const string& ns , string& errmsg ); /** * @return true if we took care of the message and nothing else should be done diff --git a/s/d_migrate.cpp b/s/d_migrate.cpp index 6f2607d..e24a02d 100644 --- a/s/d_migrate.cpp +++ b/s/d_migrate.cpp @@ -29,13 +29,12 @@ #include "../db/commands.h" #include "../db/jsobj.h" -#include "../db/dbmessage.h" -#include "../db/query.h" #include "../db/cmdline.h" #include "../db/queryoptimizer.h" #include "../db/btree.h" #include "../db/repl_block.h" #include "../db/dur.h" +#include "../db/clientcursor.h" #include "../client/connpool.h" #include "../client/distlock.h" @@ -43,6 +42,7 @@ #include "../util/queue.h" #include "../util/unittest.h" #include "../util/processinfo.h" +#include "../util/ramlog.h" #include "shard.h" #include "d_logic.h" @@ -53,6 +53,8 @@ using namespace std; namespace mongo { + Tee* migrateLog = new RamLog( "migrate" ); + class MoveTimingHelper { public: MoveTimingHelper( const string& where , const string& ns , BSONObj min , BSONObj max , int total ) @@ -72,7 +74,7 @@ namespace mongo { configServer.logChange( (string)"moveChunk." + _where , _ns, _b.obj() ); } catch ( const std::exception& e ) { - log( LL_WARNING ) << "couldn't record timing for moveChunk '" << _where << "': " << e.what() << endl; + warning() << "couldn't record timing for moveChunk '" << _where << "': " << e.what() << migrateLog; } } @@ -88,7 +90,7 @@ namespace mongo { if ( op ) op->setMessage( s.c_str() ); else - log( LL_WARNING ) << "op is null in MoveTimingHelper::done" << endl; + warning() << "op is null in MoveTimingHelper::done" << migrateLog; _b.appendNumber( s , _t.millis() ); _t.reset(); @@ -98,7 +100,7 @@ namespace mongo { ProcessInfo pi; ss << " v:" << pi.getVirtualMemorySize() << " r:" << pi.getResidentSize(); - log() << ss.str() << endl; + log() << ss.str() << migrateLog; #endif } @@ -130,7 +132,7 @@ namespace mongo { }; struct OldDataCleanup { - static AtomicUInt _numThreads; // how many threads are doing async cleanusp + static AtomicUInt _numThreads; // how many threads are doing async cleanup string ns; BSONObj min; @@ -151,12 +153,31 @@ namespace mongo { _numThreads--; } + string toString() const { + return str::stream() << ns << " from " << min << " -> " << max; + } + void doRemove() { ShardForceVersionOkModeBlock sf; - writelock lk(ns); - RemoveSaver rs("moveChunk",ns,"post-cleanup"); - long long num = Helpers::removeRange( ns , min , max , true , false , cmdLine.moveParanoia ? &rs : 0 ); - log() << "moveChunk deleted: " << num << endl; + { + writelock lk(ns); + RemoveSaver rs("moveChunk",ns,"post-cleanup"); + long long numDeleted = Helpers::removeRange( ns , min , max , true , false , cmdLine.moveParanoia ? &rs : 0 ); + log() << "moveChunk deleted: " << numDeleted << migrateLog; + } + + ReplTime lastOpApplied = cc().getLastOp(); + + Timer t; + for ( int i=0; i<3600; i++ ) { + if ( opReplicatedEnough( lastOpApplied , ( getSlaveCount() / 2 ) + 1 ) ) { + LOG(t.seconds() < 30 ? 1 : 0) << "moveChunk repl sync took " << t.seconds() << " seconds" << migrateLog; + return; + } + sleepsecs(1); + } + + warning() << "moveChunk repl sync timed out after " << t.seconds() << " seconds" << migrateLog; } }; @@ -172,7 +193,7 @@ namespace mongo { } virtual void help( stringstream& help ) const { - help << "internal - should not be called directly" << endl; + help << "internal - should not be called directly" << migrateLog; } virtual bool slaveOk() const { return false; } virtual bool adminOnly() const { return true; } @@ -190,14 +211,14 @@ namespace mongo { class MigrateFromStatus { public: - MigrateFromStatus() : _m("MigrateFromStatus") , _workLock( "MigrateFromStatus::WorkLock" ) { + MigrateFromStatus() : _m("MigrateFromStatus") , _workLock("MigrateFromStatus::workLock") { _active = false; _inCriticalSection = false; _memoryUsed = 0; } void start( string ns , const BSONObj& min , const BSONObj& max ) { - scoped_lock lk( _workLock ); + scoped_lock ll(_workLock); scoped_lock l(_m); // reads and writes _active assert( ! _active ); @@ -255,7 +276,7 @@ namespace mongo { ide = obj["_id"]; if ( ide.eoo() ) { - log( LL_WARNING ) << "logOpForSharding got mod with no _id, ignoring obj: " << obj << endl; + warning() << "logOpForSharding got mod with no _id, ignoring obj: " << obj << migrateLog; return; } @@ -284,7 +305,7 @@ namespace mongo { case 'u': if ( ! Helpers::findById( cc() , _ns.c_str() , ide.wrap() , it ) ) { - log( LL_WARNING ) << "logOpForSharding couldn't find: " << ide << " even though should have" << endl; + warning() << "logOpForSharding couldn't find: " << ide << " even though should have" << migrateLog; return; } break; @@ -378,13 +399,13 @@ namespace mongo { return false; } - scoped_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , - shared_ptr<Cursor>( new BtreeCursor( d , d->idxNo(*idx) , *idx , min , max , false , 1 ) ) , - _ns ) ); + auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , + shared_ptr<Cursor>( BtreeCursor::make( d , d->idxNo(*idx) , *idx , min , max , false , 1 ) ) , + _ns ) ); // use the average object size to estimate how many objects a full chunk would carry // do that while traversing the chunk's range using the sharding index, below - // there's a fair amout of slack before we determine a chunk is too large because object sizes will vary + // there's a fair amount of slack before we determine a chunk is too large because object sizes will vary unsigned long long maxRecsWhenFull; long long avgRecSize; const long long totalRecs = d->stats.nrecords; @@ -412,7 +433,8 @@ namespace mongo { // we can afford to yield here because any change to the base data that we might miss is already being // queued and will be migrated in the 'transferMods' stage - if ( ! cc->yieldSometimes() ) { + if ( ! cc->yieldSometimes( ClientCursor::DontNeed ) ) { + cc.release(); break; } @@ -422,19 +444,19 @@ namespace mongo { } if ( isLargeChunk ) { - warning() << "can't move chunk of size (aprox) " << recCount * avgRecSize + warning() << "can't move chunk of size (approximately) " << recCount * avgRecSize << " because maximum size allowed to move is " << maxChunkSize << " ns: " << _ns << " " << _min << " -> " << _max - << endl; + << migrateLog; result.appendBool( "chunkTooBig" , true ); - result.appendNumber( "chunkSize" , (long long)(recCount * avgRecSize) ); + result.appendNumber( "estimatedChunkSize" , (long long)(recCount * avgRecSize) ); errmsg = "chunk too big to move"; return false; } { scoped_spinlock lk( _trackerLocks ); - log() << "moveChunk number of documents: " << _cloneLocs.size() << endl; + log() << "moveChunk number of documents: " << _cloneLocs.size() << migrateLog; } return true; } @@ -515,18 +537,19 @@ namespace mongo { void setInCriticalSection( bool b ) { scoped_lock l(_m); _inCriticalSection = b; } bool isActive() const { return _getActive(); } - - + void doRemove( OldDataCleanup& cleanup ) { + int it = 0; while ( true ) { + if ( it > 20 && it % 10 == 0 ) log() << "doRemote iteration " << it << " for: " << cleanup << endl; { - scoped_lock lk( _workLock ); + scoped_lock ll(_workLock); if ( ! _active ) { cleanup.doRemove(); return; } } - sleepmillis( 100 ); + sleepmillis( 1000 ); } } @@ -572,7 +595,10 @@ namespace mongo { void _cleanupOldData( OldDataCleanup cleanup ) { Client::initThread( cleanUpThreadName ); - log() << " (start) waiting to cleanup " << cleanup.ns << " from " << cleanup.min << " -> " << cleanup.max << " # cursors:" << cleanup.initial.size() << endl; + if (!noauth) { + cc().getAuthenticationInfo()->authorize("local", internalSecurity.user); + } + log() << " (start) waiting to cleanup " << cleanup << " # cursors:" << cleanup.initial.size() << migrateLog; int loops = 0; Timer t; @@ -595,14 +621,14 @@ namespace mongo { cleanup.initial = left; if ( ( loops++ % 200 ) == 0 ) { - log() << " (looping " << loops << ") waiting to cleanup " << cleanup.ns << " from " << cleanup.min << " -> " << cleanup.max << " # cursors:" << cleanup.initial.size() << endl; + log() << " (looping " << loops << ") waiting to cleanup " << cleanup.ns << " from " << cleanup.min << " -> " << cleanup.max << " # cursors:" << cleanup.initial.size() << migrateLog; stringstream ss; for ( set<CursorId>::iterator i=cleanup.initial.begin(); i!=cleanup.initial.end(); ++i ) { CursorId id = *i; ss << id << " "; } - log() << " cursors: " << ss.str() << endl; + log() << " cursors: " << ss.str() << migrateLog; } } @@ -616,10 +642,10 @@ namespace mongo { _cleanupOldData( cleanup ); } catch ( std::exception& e ) { - log() << " error cleaning old data:" << e.what() << endl; + log() << " error cleaning old data:" << e.what() << migrateLog; } catch ( ... ) { - log() << " unknown error cleaning old data" << endl; + log() << " unknown error cleaning old data" << migrateLog; } } @@ -635,7 +661,7 @@ namespace mongo { public: TransferModsCommand() : ChunkCommandHelper( "_transferMods" ) {} - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { return migrateFromStatus.transferMods( errmsg, result ); } } transferModsCommand; @@ -645,7 +671,7 @@ namespace mongo { public: InitialCloneCommand() : ChunkCommandHelper( "_migrateClone" ) {} - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { return migrateFromStatus.clone( errmsg, result ); } } initialCloneCommand; @@ -661,7 +687,7 @@ namespace mongo { public: MoveChunkCommand() : Command( "moveChunk" ) {} virtual void help( stringstream& help ) const { - help << "should not be calling this directly" << endl; + help << "should not be calling this directly" << migrateLog; } virtual bool slaveOk() const { return false; } @@ -669,7 +695,7 @@ namespace mongo { virtual LockType locktype() const { return NONE; } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { // 1. parse options // 2. make sure my view is complete and lock // 3. start migrate @@ -745,15 +771,24 @@ namespace mongo { Shard fromShard( from ); Shard toShard( to ); - log() << "received moveChunk request: " << cmdObj << endl; + log() << "received moveChunk request: " << cmdObj << migrateLog; timing.done(1); // 2. DistributedLock lockSetup( ConnectionString( shardingState.getConfigServer() , ConnectionString::SYNC ) , ns ); - dist_lock_try dlk( &lockSetup , (string)"migrate-" + min.toString() ); + dist_lock_try dlk; + + try{ + dlk = dist_lock_try( &lockSetup , (string)"migrate-" + min.toString() ); + } + catch( LockException& e ){ + errmsg = str::stream() << "error locking distributed lock for migration " << "migrate-" << min.toString() << causedBy( e ); + return false; + } + if ( ! dlk.got() ) { - errmsg = "the collection's metadata lock is taken"; + errmsg = str::stream() << "the collection metadata could not be locked with lock " << "migrate-" << min.toString(); result.append( "who" , dlk.other() ); return false; } @@ -785,8 +820,8 @@ namespace mongo { result.append( "requestedMin" , min ); result.append( "requestedMax" , max ); - log( LL_WARNING ) << "aborted moveChunk because" << errmsg << ": " << min << "->" << max - << " is now " << currMin << "->" << currMax << endl; + warning() << "aborted moveChunk because" << errmsg << ": " << min << "->" << max + << " is now " << currMin << "->" << currMax << migrateLog; return false; } @@ -795,8 +830,8 @@ namespace mongo { result.append( "from" , fromShard.getName() ); result.append( "official" , myOldShard ); - log( LL_WARNING ) << "aborted moveChunk because " << errmsg << ": chunk is at " << myOldShard - << " and not at " << fromShard.getName() << endl; + warning() << "aborted moveChunk because " << errmsg << ": chunk is at " << myOldShard + << " and not at " << fromShard.getName() << migrateLog; return false; } @@ -805,8 +840,8 @@ namespace mongo { result.appendTimestamp( "officialVersion" , maxVersion ); result.appendTimestamp( "myVersion" , shardingState.getVersion( ns ) ); - log( LL_WARNING ) << "aborted moveChunk because " << errmsg << ": official " << maxVersion - << " mine: " << shardingState.getVersion(ns) << endl; + warning() << "aborted moveChunk because " << errmsg << ": official " << maxVersion + << " mine: " << shardingState.getVersion(ns) << migrateLog; return false; } @@ -815,7 +850,7 @@ namespace mongo { ShardChunkVersion shardVersion; shardingState.trySetVersion( ns , shardVersion /* will return updated */ ); - log() << "moveChunk request accepted at version " << shardVersion << endl; + log() << "moveChunk request accepted at version " << shardVersion << migrateLog; } timing.done(2); @@ -860,10 +895,10 @@ namespace mongo { res = res.getOwned(); conn.done(); - log(0) << "moveChunk data transfer progress: " << res << " my mem used: " << migrateFromStatus.mbUsed() << endl; + log(0) << "moveChunk data transfer progress: " << res << " my mem used: " << migrateFromStatus.mbUsed() << migrateLog; if ( ! ok || res["state"].String() == "fail" ) { - log( LL_WARNING ) << "moveChunk error transfering data caused migration abort: " << res << endl; + warning() << "moveChunk error transferring data caused migration abort: " << res << migrateLog; errmsg = "data transfer error"; result.append( "cause" , res ); return false; @@ -880,7 +915,7 @@ namespace mongo { conn->runCommand( "admin" , BSON( "_recvChunkAbort" << 1 ) , res ); res = res.getOwned(); conn.done(); - error() << "aborting migrate because too much memory used res: " << res << endl; + error() << "aborting migrate because too much memory used res: " << res << migrateLog; errmsg = "aborting migrate because too much memory used"; result.appendBool( "split" , true ); return false; @@ -908,7 +943,7 @@ namespace mongo { shardingState.donateChunk( ns , min , max , myVersion ); } - log() << "moveChunk setting version to: " << myVersion << endl; + log() << "moveChunk setting version to: " << myVersion << migrateLog; // 5.b // we're under the collection lock here, too, so we can undo the chunk donation because no other state change @@ -929,15 +964,15 @@ namespace mongo { shardingState.undoDonateChunk( ns , min , max , currVersion ); } - log() << "movChunk migrate commit not accepted by TO-shard: " << res - << " resetting shard version to: " << currVersion << endl; + log() << "moveChunk migrate commit not accepted by TO-shard: " << res + << " resetting shard version to: " << currVersion << migrateLog; errmsg = "_recvChunkCommit failed!"; result.append( "cause" , res ); return false; } - log() << "moveChunk migrate commit accepted by TO-shard: " << res << endl; + log() << "moveChunk migrate commit accepted by TO-shard: " << res << migrateLog; } // 5.c @@ -1018,12 +1053,12 @@ namespace mongo { updates.append( op.obj() ); log() << "moveChunk updating self version to: " << nextVersion << " through " - << bumpMin << " -> " << bumpMax << " for collection '" << ns << "'" << endl; + << bumpMin << " -> " << bumpMax << " for collection '" << ns << "'" << migrateLog; } else { - log() << "moveChunk moved last chunk out for collection '" << ns << "'" << endl; + log() << "moveChunk moved last chunk out for collection '" << ns << "'" << migrateLog; } updates.done(); @@ -1044,7 +1079,7 @@ namespace mongo { preCond.done(); BSONObj cmd = cmdBuilder.obj(); - log(7) << "moveChunk update: " << cmd << endl; + LOG(7) << "moveChunk update: " << cmd << migrateLog; bool ok = false; BSONObj cmdResult; @@ -1054,6 +1089,7 @@ namespace mongo { conn.done(); } catch ( DBException& e ) { + warning() << e << migrateLog; ok = false; BSONObjBuilder b; e.getInfo().append( b ); @@ -1069,7 +1105,7 @@ namespace mongo { // if the commit did not make it, currently the only way to fix this state is to bounce the mongod so // that the old state (before migrating) be brought in - warning() << "moveChunk commit outcome ongoing: " << cmd << " for command :" << cmdResult << endl; + warning() << "moveChunk commit outcome ongoing: " << cmd << " for command :" << cmdResult << migrateLog; sleepsecs( 10 ); try { @@ -1081,13 +1117,13 @@ namespace mongo { ShardChunkVersion checkVersion = doc["lastmod"]; if ( checkVersion == nextVersion ) { - log() << "moveChunk commit confirmed" << endl; + log() << "moveChunk commit confirmed" << migrateLog; } else { error() << "moveChunk commit failed: version is at" - << checkVersion << " instead of " << nextVersion << endl; - error() << "TERMINATING" << endl; + << checkVersion << " instead of " << nextVersion << migrateLog; + error() << "TERMINATING" << migrateLog; dbexit( EXIT_SHARDING_ERROR ); } @@ -1095,8 +1131,8 @@ namespace mongo { } catch ( ... ) { - error() << "moveChunk failed to get confirmation of commit" << endl; - error() << "TERMINATING" << endl; + error() << "moveChunk failed to get confirmation of commit" << migrateLog; + error() << "TERMINATING" << migrateLog; dbexit( EXIT_SHARDING_ERROR ); } } @@ -1118,11 +1154,11 @@ namespace mongo { c.max = max.getOwned(); ClientCursor::find( ns , c.initial ); if ( c.initial.size() ) { - log() << "forking for cleaning up chunk data" << endl; + log() << "forking for cleaning up chunk data" << migrateLog; boost::thread t( boost::bind( &cleanupOldData , c ) ); } else { - log() << "doing delete inline" << endl; + log() << "doing delete inline" << migrateLog; // 7. c.doRemove(); } @@ -1156,7 +1192,7 @@ namespace mongo { class MigrateStatus { public: - + MigrateStatus() : m_active("MigrateStatus") { active = false; } void prepare() { @@ -1181,12 +1217,12 @@ namespace mongo { catch ( std::exception& e ) { state = FAIL; errmsg = e.what(); - log( LL_ERROR ) << "migrate failed: " << e.what() << endl; + error() << "migrate failed: " << e.what() << migrateLog; } catch ( ... ) { state = FAIL; errmsg = "UNKNOWN ERROR"; - log( LL_ERROR ) << "migrate failed with unknown exception" << endl; + error() << "migrate failed with unknown exception" << migrateLog; } setActive( false ); } @@ -1230,7 +1266,7 @@ namespace mongo { RemoveSaver rs( "moveChunk" , ns , "preCleanup" ); long long num = Helpers::removeRange( ns , min , max , true , false , cmdLine.moveParanoia ? &rs : 0 ); if ( num ) - log( LL_WARNING ) << "moveChunkCmd deleted data already in chunk # objects: " << num << endl; + warning() << "moveChunkCmd deleted data already in chunk # objects: " << num << migrateLog; timing.done(2); } @@ -1246,7 +1282,7 @@ namespace mongo { state = FAIL; errmsg = "_migrateClone failed: "; errmsg += res.toString(); - error() << errmsg << endl; + error() << errmsg << migrateLog; conn.done(); return; } @@ -1274,7 +1310,7 @@ namespace mongo { } // if running on a replicated system, we'll need to flush the docs we cloned to the secondaries - ReplTime lastOpApplied; + ReplTime lastOpApplied = cc().getLastOp(); { // 4. do bulk of mods @@ -1285,7 +1321,7 @@ namespace mongo { state = FAIL; errmsg = "_transferMods failed: "; errmsg += res.toString(); - log( LL_ERROR ) << "_transferMods failed: " << res << endl; + error() << "_transferMods failed: " << res << migrateLog; conn.done(); return; } @@ -1306,7 +1342,7 @@ namespace mongo { break; if ( i > 100 ) { - warning() << "secondaries having hard time keeping up with migrate" << endl; + warning() << "secondaries having hard time keeping up with migrate" << migrateLog; } sleepmillis( 20 ); @@ -1314,7 +1350,7 @@ namespace mongo { if ( i == maxIterations ) { errmsg = "secondary can't keep up with migrate"; - error() << errmsg << endl; + error() << errmsg << migrateLog; conn.done(); state = FAIL; return; @@ -1324,15 +1360,25 @@ namespace mongo { timing.done(4); } + { + // pause to wait for replication + // this will prevent us from going into critical section until we're ready + Timer t; + while ( t.minutes() < 600 ) { + if ( flushPendingWrites( lastOpApplied ) ) + break; + sleepsecs(1); + } + } + { // 5. wait for commit - Timer timeWaitingForCommit; state = STEADY; while ( state == STEADY || state == COMMIT_START ) { BSONObj res; if ( ! conn->runCommand( "admin" , BSON( "_transferMods" << 1 ) , res ) ) { - log() << "_transferMods failed in STEADY state: " << res << endl; + log() << "_transferMods failed in STEADY state: " << res << migrateLog; errmsg = res.toString(); state = FAIL; conn.done(); @@ -1342,20 +1388,21 @@ namespace mongo { if ( res["size"].number() > 0 && apply( res , &lastOpApplied ) ) continue; - if ( state == COMMIT_START && flushPendingWrites( lastOpApplied ) ) - break; - + if ( state == ABORT ) { + timing.note( "aborted" ); + return; + } + + if ( state == COMMIT_START ) { + if ( flushPendingWrites( lastOpApplied ) ) + break; + } + sleepmillis( 10 ); } - if ( state == ABORT ) { - timing.note( "aborted" ); - return; - } - - if ( timeWaitingForCommit.seconds() > 86400 ) { - state = FAIL; - errmsg = "timed out waiting for commit"; + if ( state == FAIL ) { + errmsg = "imted out waiting for commit"; return; } @@ -1411,7 +1458,7 @@ namespace mongo { BSONObj fullObj; if ( Helpers::findById( cc() , ns.c_str() , id, fullObj ) ) { if ( ! isInRange( fullObj , min , max ) ) { - log() << "not applying out of range deletion: " << fullObj << endl; + log() << "not applying out of range deletion: " << fullObj << migrateLog; continue; } @@ -1451,18 +1498,22 @@ namespace mongo { bool flushPendingWrites( const ReplTime& lastOpApplied ) { if ( ! opReplicatedEnough( lastOpApplied ) ) { - warning() << "migrate commit attempt timed out contacting " << slaveCount - << " slaves for '" << ns << "' " << min << " -> " << max << endl; + OpTime op( lastOpApplied ); + OCCASIONALLY warning() << "migrate commit waiting for " << slaveCount + << " slaves for '" << ns << "' " << min << " -> " << max + << " waiting for: " << op + << migrateLog; return false; } - log() << "migrate commit succeeded flushing to secondaries for '" << ns << "' " << min << " -> " << max << endl; + + log() << "migrate commit succeeded flushing to secondaries for '" << ns << "' " << min << " -> " << max << migrateLog; { readlock lk(ns); // commitNow() currently requires it // if durability is on, force a write to journal if ( getDur().commitNow() ) { - log() << "migrate commit flushed to journal for '" << ns << "' " << min << " -> " << max << endl; + log() << "migrate commit flushed to journal for '" << ns << "' " << min << " -> " << max << migrateLog; } } @@ -1488,13 +1539,16 @@ namespace mongo { if ( state != STEADY ) return false; state = COMMIT_START; - - for ( int i=0; i<86400; i++ ) { + + Timer t; + // we wait for the commit to succeed before giving up + while ( t.minutes() <= 5 ) { sleepmillis(1); if ( state == DONE ) return true; } - log() << "startCommit never finished!" << endl; + state = FAIL; + log() << "startCommit never finished!" << migrateLog; return false; } @@ -1529,6 +1583,10 @@ namespace mongo { void migrateThread() { Client::initThread( "migrateThread" ); + if (!noauth) { + ShardedConnectionInfo::addHook(); + cc().getAuthenticationInfo()->authorize("local", internalSecurity.user); + } migrateStatus.go(); cc().shutdown(); } @@ -1539,7 +1597,7 @@ namespace mongo { virtual LockType locktype() const { return WRITE; } // this is so don't have to do locking internally - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { if ( migrateStatus.getActive() ) { errmsg = "migrate already in progress"; @@ -1576,7 +1634,7 @@ namespace mongo { public: RecvChunkStatusCommand() : ChunkCommandHelper( "_recvChunkStatus" ) {} - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { migrateStatus.status( result ); return 1; } @@ -1587,7 +1645,7 @@ namespace mongo { public: RecvChunkCommitCommand() : ChunkCommandHelper( "_recvChunkCommit" ) {} - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { bool ok = migrateStatus.startCommit(); migrateStatus.status( result ); return ok; @@ -1599,7 +1657,7 @@ namespace mongo { public: RecvChunkAbortCommand() : ChunkCommandHelper( "_recvChunkAbort" ) {} - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { migrateStatus.abort(); migrateStatus.status( result ); return true; @@ -1621,7 +1679,7 @@ namespace mongo { assert( ! isInRange( BSON( "x" << 5 ) , min , max ) ); assert( ! isInRange( BSON( "x" << 6 ) , min , max ) ); - log(1) << "isInRangeTest passed" << endl; + LOG(1) << "isInRangeTest passed" << migrateLog; } } isInRangeTest; } diff --git a/s/d_split.cpp b/s/d_split.cpp index 3ed6e9b..cef6188 100644 --- a/s/d_split.cpp +++ b/s/d_split.cpp @@ -22,10 +22,10 @@ #include "../db/btree.h" #include "../db/commands.h" -#include "../db/dbmessage.h" #include "../db/jsobj.h" -#include "../db/query.h" +#include "../db/instance.h" #include "../db/queryoptimizer.h" +#include "../db/clientcursor.h" #include "../client/connpool.h" #include "../client/distlock.h" @@ -57,7 +57,7 @@ namespace mongo { "example: { medianKey:\"blog.posts\", keyPattern:{x:1}, min:{x:10}, max:{x:55} }\n" "NOTE: This command may take a while to run"; } - bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { + bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { const char *ns = jsobj.getStringField( "medianKey" ); BSONObj min = jsobj.getObjectField( "min" ); BSONObj max = jsobj.getObjectField( "max" ); @@ -74,22 +74,25 @@ namespace mongo { NamespaceDetails *d = nsdetails(ns); int idxNo = d->idxNo(*id); - // only yielding on firt half for now + // only yielding on first half for now // after this it should be in ram, so 2nd should be fast { - shared_ptr<Cursor> c( new BtreeCursor( d, idxNo, *id, min, max, false, 1 ) ); - scoped_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) ); + shared_ptr<Cursor> c( BtreeCursor::make( d, idxNo, *id, min, max, false, 1 ) ); + auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) ); while ( c->ok() ) { num++; c->advance(); - if ( ! cc->yieldSometimes() ) + if ( ! cc->yieldSometimes( ClientCursor::DontNeed ) ) { + cc.release(); break; + } } } num /= 2; - BtreeCursor c( d, idxNo, *id, min, max, false, 1 ); + auto_ptr<BtreeCursor> _c( BtreeCursor::make( d, idxNo, *id, min, max, false, 1 ) ); + BtreeCursor& c = *_c; for( ; num; c.advance(), --num ); ostringstream os; @@ -133,12 +136,12 @@ namespace mongo { help << "Internal command.\n"; } - bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { + bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { const char* ns = jsobj.getStringField( "checkShardingIndex" ); BSONObj keyPattern = jsobj.getObjectField( "keyPattern" ); - if ( keyPattern.nFields() == 1 && str::equals( "_id" , keyPattern.firstElement().fieldName() ) ) { + if ( keyPattern.nFields() == 1 && str::equals( "_id" , keyPattern.firstElementFieldName() ) ) { result.appendBool( "idskip" , true ); return true; } @@ -174,9 +177,14 @@ namespace mongo { return false; } - BtreeCursor * bc = new BtreeCursor( d , d->idxNo(*idx) , *idx , min , max , false , 1 ); + if( d->isMultikey( d->idxNo( *idx ) ) ) { + errmsg = "index is multikey, cannot use for sharding"; + return false; + } + + BtreeCursor * bc = BtreeCursor::make( d , d->idxNo(*idx) , *idx , min , max , false , 1 ); shared_ptr<Cursor> c( bc ); - scoped_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) ); + auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) ); if ( ! cc->ok() ) { // range is empty return true; @@ -217,8 +225,10 @@ namespace mongo { } cc->advance(); - if ( ! cc->yieldSometimes() ) + if ( ! cc->yieldSometimes( ClientCursor::DontNeed ) ) { + cc.release(); break; + } } return true; @@ -243,7 +253,7 @@ namespace mongo { "NOTE: This command may take a while to run"; } - bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { + bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { // // 1.a We'll parse the parameters in two steps. First, make sure the we can use the split index to get @@ -368,9 +378,9 @@ namespace mongo { long long currCount = 0; long long numChunks = 0; - BtreeCursor * bc = new BtreeCursor( d , d->idxNo(*idx) , *idx , min , max , false , 1 ); + BtreeCursor * bc = BtreeCursor::make( d , d->idxNo(*idx) , *idx , min , max , false , 1 ); shared_ptr<Cursor> c( bc ); - scoped_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) ); + auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) ); if ( ! cc->ok() ) { errmsg = "can't open a cursor for splitting (desired range is possibly empty)"; return false; @@ -414,13 +424,13 @@ namespace mongo { break; } - if ( ! cc->yieldSometimes() ) { + if ( ! cc->yieldSometimes( ClientCursor::DontNeed ) ) { // we were near and and got pushed to the end // i think returning the splits we've already found is fine - // don't use the btree cursor pointer to acces keys beyond this point but ok + // don't use the btree cursor pointer to access keys beyond this point but ok // to use it for format the keys we've got already - + cc.release(); break; } } @@ -433,7 +443,7 @@ namespace mongo { currCount = 0; log() << "splitVector doing another cycle because of force, keyCount now: " << keyCount << endl; - bc = new BtreeCursor( d , d->idxNo(*idx) , *idx , min , max , false , 1 ); + bc = BtreeCursor::make( d , d->idxNo(*idx) , *idx , min , max , false , 1 ); c.reset( bc ); cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) ); } @@ -519,7 +529,7 @@ namespace mongo { virtual bool adminOnly() const { return true; } virtual LockType locktype() const { return NONE; } - bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { + bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { // // 1. check whether parameters passed to splitChunk are sound @@ -531,31 +541,31 @@ namespace mongo { return false; } - BSONObj keyPattern = cmdObj["keyPattern"].Obj(); + const BSONObj keyPattern = cmdObj["keyPattern"].Obj(); if ( keyPattern.isEmpty() ) { errmsg = "need to specify the key pattern the collection is sharded over"; return false; } - BSONObj min = cmdObj["min"].Obj(); + const BSONObj min = cmdObj["min"].Obj(); if ( min.isEmpty() ) { - errmsg = "neet to specify the min key for the chunk"; + errmsg = "need to specify the min key for the chunk"; return false; } - BSONObj max = cmdObj["max"].Obj(); + const BSONObj max = cmdObj["max"].Obj(); if ( max.isEmpty() ) { - errmsg = "neet to specify the max key for the chunk"; + errmsg = "need to specify the max key for the chunk"; return false; } - string from = cmdObj["from"].str(); + const string from = cmdObj["from"].str(); if ( from.empty() ) { errmsg = "need specify server to split chunk at"; return false; } - BSONObj splitKeysElem = cmdObj["splitKeys"].Obj(); + const BSONObj splitKeysElem = cmdObj["splitKeys"].Obj(); if ( splitKeysElem.isEmpty() ) { errmsg = "need to provide the split points to chunk over"; return false; @@ -566,7 +576,7 @@ namespace mongo { splitKeys.push_back( it.next().Obj().getOwned() ); } - BSONElement shardId = cmdObj["shardId"]; + const BSONElement shardId = cmdObj["shardId"]; if ( shardId.eoo() ) { errmsg = "need to provide shardId"; return false; @@ -594,7 +604,16 @@ namespace mongo { // DistributedLock lockSetup( ConnectionString( shardingState.getConfigServer() , ConnectionString::SYNC) , ns ); - dist_lock_try dlk( &lockSetup, string("split-") + min.toString() ); + dist_lock_try dlk; + + try{ + dlk = dist_lock_try( &lockSetup, string("split-") + min.toString() ); + } + catch( LockException& e ){ + errmsg = str::stream() << "Error locking distributed lock for split." << causedBy( e ); + return false; + } + if ( ! dlk.got() ) { errmsg = "the collection's metadata lock is taken"; result.append( "who" , dlk.other() ); @@ -672,7 +691,7 @@ namespace mongo { BSONObjBuilder logDetail; origChunk.appendShortVersion( "before" , logDetail ); - log(1) << "before split on " << origChunk << endl; + LOG(1) << "before split on " << origChunk << endl; vector<ChunkInfo> newChunks; ShardChunkVersion myVersion = maxVersion; @@ -695,7 +714,7 @@ namespace mongo { op.appendBool( "b" , true ); op.append( "ns" , ShardNS::chunk ); - // add the modified (new) chunk infomation as the update object + // add the modified (new) chunk information as the update object BSONObjBuilder n( op.subobjStart( "o" ) ); n.append( "_id" , Chunk::genID( ns , startKey ) ); n.appendTimestamp( "lastmod" , myVersion ); @@ -781,13 +800,28 @@ namespace mongo { for ( int i=0; i < newChunksSize; i++ ) { BSONObjBuilder chunkDetail; chunkDetail.appendElements( beforeDetailObj ); - chunkDetail.append( "number", i ); + chunkDetail.append( "number", i+1 ); chunkDetail.append( "of" , newChunksSize ); newChunks[i].appendShortVersion( "chunk" , chunkDetail ); configServer.logChange( "multi-split" , ns , chunkDetail.obj() ); } } + if (newChunks.size() == 2){ + // If one of the chunks has only one object in it we should move it + static const BSONObj fields = BSON("_id" << 1 ); + DBDirectClient conn; + for (int i=1; i >= 0 ; i--){ // high chunk more likely to have only one obj + ChunkInfo chunk = newChunks[i]; + Query q = Query().minKey(chunk.min).maxKey(chunk.max); + scoped_ptr<DBClientCursor> c (conn.query(ns, q, /*limit*/-2, 0, &fields)); + if (c && c->itcount() == 1) { + result.append("shouldMigrate", BSON("min" << chunk.min << "max" << chunk.max)); + break; + } + } + } + return true; } } cmdSplitChunk; diff --git a/s/d_state.cpp b/s/d_state.cpp index e10400f..f43865b 100644 --- a/s/d_state.cpp +++ b/s/d_state.cpp @@ -28,8 +28,7 @@ #include "../db/commands.h" #include "../db/jsobj.h" -#include "../db/dbmessage.h" -#include "../db/query.h" +#include "../db/db.h" #include "../client/connpool.h" @@ -289,7 +288,7 @@ namespace mongo { ShardedConnectionInfo* ShardedConnectionInfo::get( bool create ) { ShardedConnectionInfo* info = _tl.get(); if ( ! info && create ) { - log(1) << "entering shard mode for connection" << endl; + LOG(1) << "entering shard mode for connection" << endl; info = new ShardedConnectionInfo(); _tl.reset( info ); } @@ -314,6 +313,15 @@ namespace mongo { _versions[ns] = version; } + void ShardedConnectionInfo::addHook() { + static bool done = false; + if (!done) { + LOG(1) << "adding sharding hook" << endl; + pool.addHook(new ShardingConnectionHook(false)); + done = true; + } + } + void ShardedConnectionInfo::setID( const OID& id ) { _id = id; } @@ -372,7 +380,7 @@ namespace mongo { virtual bool slaveOk() const { return true; } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { ShardedConnectionInfo::reset(); return true; } @@ -412,6 +420,7 @@ namespace mongo { } if ( locked ) { + ShardedConnectionInfo::addHook(); shardingState.enable( configdb ); configServer.init( configdb ); return true; @@ -443,7 +452,7 @@ namespace mongo { return true; } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { // Steps // 1. check basic config @@ -476,7 +485,7 @@ namespace mongo { string ns = cmdObj["setShardVersion"].valuestrsafe(); if ( ns.size() == 0 ) { - errmsg = "need to speciy namespace"; + errmsg = "need to specify namespace"; return false; } @@ -493,7 +502,7 @@ namespace mongo { if ( globalVersion > 0 && version > 0 ) { // this means there is no reset going on an either side - // so its safe to make some assuptions + // so its safe to make some assumptions if ( version == globalVersion ) { // mongos and mongod agree! @@ -507,6 +516,10 @@ namespace mongo { } // step 4 + + // this is because of a weird segfault I saw and I can't see why this should ever be set + massert( 13647 , str::stream() << "context should be empty here, is: " << cc().getContext()->ns() , cc().getContext() == 0 ); + dblock setShardVersionLock; // TODO: can we get rid of this?? if ( oldVersion > 0 && globalVersion == 0 ) { @@ -538,7 +551,7 @@ namespace mongo { } if ( version < oldVersion ) { - errmsg = "you already have a newer version of collection '" + ns + "'"; + errmsg = "this connection already had a newer version of collection '" + ns + "'"; result.append( "ns" , ns ); result.appendTimestamp( "newVersion" , version ); result.appendTimestamp( "globalVersion" , globalVersion ); @@ -551,10 +564,11 @@ namespace mongo { sleepmillis(2); OCCASIONALLY log() << "waiting till out of critical section" << endl; } - errmsg = "going to older version for global for collection '" + ns + "'"; + errmsg = "shard global version for collection is higher than trying to set to '" + ns + "'"; result.append( "ns" , ns ); result.appendTimestamp( "version" , version ); result.appendTimestamp( "globalVersion" , globalVersion ); + result.appendBool( "reloadConfig" , true ); return false; } @@ -572,7 +586,7 @@ namespace mongo { ShardChunkVersion currVersion = version; if ( ! shardingState.trySetVersion( ns , currVersion ) ) { - errmsg = str::stream() << "client version differs from config's for colleciton '" << ns << "'"; + errmsg = str::stream() << "client version differs from config's for collection '" << ns << "'"; result.append( "ns" , ns ); result.appendTimestamp( "version" , version ); result.appendTimestamp( "globalVersion" , currVersion ); @@ -599,10 +613,10 @@ namespace mongo { virtual LockType locktype() const { return NONE; } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string ns = cmdObj["getShardVersion"].valuestrsafe(); if ( ns.size() == 0 ) { - errmsg = "need to speciy fully namespace"; + errmsg = "need to specify full namespace"; return false; } @@ -611,6 +625,7 @@ namespace mongo { result.appendTimestamp( "global" , shardingState.getVersion(ns) ); ShardedConnectionInfo* info = ShardedConnectionInfo::get( false ); + result.appendBool( "inShardedMode" , info != 0 ); if ( info ) result.appendTimestamp( "mine" , info->getVersion(ns) ); else @@ -627,7 +642,7 @@ namespace mongo { virtual LockType locktype() const { return WRITE; } // TODO: figure out how to make this not need to lock - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { shardingState.appendInfo( result ); return true; } @@ -638,7 +653,7 @@ namespace mongo { * @ return true if not in sharded mode or if version for this client is ok */ - bool shardVersionOk( const string& ns , bool isWriteOp , string& errmsg ) { + bool shardVersionOk( const string& ns , string& errmsg ) { if ( ! shardingState.enabled() ) return true; @@ -668,7 +683,7 @@ namespace mongo { if ( version == 0 && clientVersion > 0 ) { stringstream ss; - ss << "collection was dropped or this shard no longer valied version: " << version << " clientVersion: " << clientVersion; + ss << "collection was dropped or this shard no longer valid version: " << version << " clientVersion: " << clientVersion; errmsg = ss.str(); return false; } @@ -697,4 +712,7 @@ namespace mongo { return false; } + void ShardingConnectionHook::onHandedOut( DBClientBase * conn ) { + // no-op for mongod + } } diff --git a/s/d_writeback.cpp b/s/d_writeback.cpp index 401e0aa..01c0c14 100644 --- a/s/d_writeback.cpp +++ b/s/d_writeback.cpp @@ -20,6 +20,7 @@ #include "../db/commands.h" #include "../util/queue.h" +#include "../util/net/listen.h" #include "d_writeback.h" @@ -39,28 +40,82 @@ namespace mongo { } void WriteBackManager::queueWriteBack( const string& remote , const BSONObj& o ) { - getWritebackQueue( remote )->push( o ); + getWritebackQueue( remote )->queue.push( o ); } - BlockingQueue<BSONObj>* WriteBackManager::getWritebackQueue( const string& remote ) { + shared_ptr<WriteBackManager::QueueInfo> WriteBackManager::getWritebackQueue( const string& remote ) { scoped_lock lk ( _writebackQueueLock ); - BlockingQueue<BSONObj>*& q = _writebackQueues[remote]; + shared_ptr<QueueInfo>& q = _writebackQueues[remote]; if ( ! q ) - q = new BlockingQueue<BSONObj>(); + q.reset( new QueueInfo() ); + q->lastCall = Listener::getElapsedTimeMillis(); return q; } bool WriteBackManager::queuesEmpty() const { scoped_lock lk( _writebackQueueLock ); for ( WriteBackQueuesMap::const_iterator it = _writebackQueues.begin(); it != _writebackQueues.end(); ++it ) { - const BlockingQueue<BSONObj>* queue = it->second; - if (! queue->empty() ) { + const shared_ptr<QueueInfo> queue = it->second; + if (! queue->queue.empty() ) { return false; } } return true; } + void WriteBackManager::appendStats( BSONObjBuilder& b ) const { + BSONObjBuilder sub; + long long totalQueued = 0; + long long now = Listener::getElapsedTimeMillis(); + { + scoped_lock lk( _writebackQueueLock ); + for ( WriteBackQueuesMap::const_iterator it = _writebackQueues.begin(); it != _writebackQueues.end(); ++it ) { + const shared_ptr<QueueInfo> queue = it->second; + + BSONObjBuilder t( sub.subobjStart( it->first ) ); + t.appendNumber( "n" , queue->queue.size() ); + t.appendNumber( "minutesSinceLastCall" , ( now - queue->lastCall ) / ( 1000 * 60 ) ); + t.done(); + + totalQueued += queue->queue.size(); + } + } + + b.appendBool( "hasOpsQueued" , totalQueued > 0 ); + b.appendNumber( "totalOpsQueued" , totalQueued ); + b.append( "queues" , sub.obj() ); + } + + bool WriteBackManager::cleanupOldQueues() { + long long now = Listener::getElapsedTimeMillis(); + + scoped_lock lk( _writebackQueueLock ); + for ( WriteBackQueuesMap::iterator it = _writebackQueues.begin(); it != _writebackQueues.end(); ++it ) { + const shared_ptr<QueueInfo> queue = it->second; + long long sinceMinutes = ( now - queue->lastCall ) / ( 1000 * 60 ); + + if ( sinceMinutes < 60 ) // minutes of inactivity. + continue; + + log() << "deleting queue from: " << it->first + << " of size: " << queue->queue.size() + << " after " << sinceMinutes << " inactivity" + << " (normal if any mongos has restarted)" + << endl; + + _writebackQueues.erase( it ); + return true; + } + return false; + } + + void WriteBackManager::Cleaner::taskDoWork() { + for ( int i=0; i<1000; i++ ) { + if ( ! writeBackManager.cleanupOldQueues() ) + break; + } + } + // ---------- admin commands ---------- // Note, this command will block until there is something to WriteBack @@ -74,7 +129,7 @@ namespace mongo { void help(stringstream& h) const { h<<"internal"; } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { BSONElement e = cmdObj.firstElement(); if ( e.type() != jstOID ) { @@ -88,8 +143,8 @@ namespace mongo { // the command issuer is blocked awaiting a response // we want to do return at least at every 5 minutes so sockets don't timeout BSONObj z; - if ( writeBackManager.getWritebackQueue(id.str())->blockingPop( z, 5 * 60 /* 5 minutes */ ) ) { - log(1) << "WriteBackCommand got : " << z << endl; + if ( writeBackManager.getWritebackQueue(id.str())->queue.blockingPop( z, 5 * 60 /* 5 minutes */ ) ) { + LOG(1) << "WriteBackCommand got : " << z << endl; result.append( "data" , z ); } else { @@ -110,14 +165,15 @@ namespace mongo { void help(stringstream& help) const { help << "Returns whether there are operations in the writeback queue at the time the command was called. " - << "This is an internal comand"; + << "This is an internal command"; } - bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { - result.appendBool( "hasOpsQueued" , ! writeBackManager.queuesEmpty() ); + bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { + writeBackManager.appendStats( result ); return true; } } writeBacksQueuedCommand; + } // namespace mongo diff --git a/s/d_writeback.h b/s/d_writeback.h index 32f5b1c..d3f36a1 100644 --- a/s/d_writeback.h +++ b/s/d_writeback.h @@ -21,6 +21,7 @@ #include "../pch.h" #include "../util/queue.h" +#include "../util/background.h" namespace mongo { @@ -33,6 +34,21 @@ namespace mongo { */ class WriteBackManager { public: + + class QueueInfo : boost::noncopyable { + public: + QueueInfo(){} + + BlockingQueue<BSONObj> queue; + long long lastCall; // this is ellapsed millis since startup + }; + + // a map from mongos's serverIDs to queues of "rejected" operations + // an operation is rejected if it targets data that does not live on this shard anymore + typedef map<string,shared_ptr<QueueInfo> > WriteBackQueuesMap; + + + public: WriteBackManager(); ~WriteBackManager(); @@ -51,22 +67,37 @@ namespace mongo { * * Gets access to server 'remote's queue, which is synchronized. */ - BlockingQueue<BSONObj>* getWritebackQueue( const string& remote ); + shared_ptr<QueueInfo> getWritebackQueue( const string& remote ); /* * @return true if there is no operation queued for write back */ bool queuesEmpty() const; + /** + * appends a number of statistics + */ + void appendStats( BSONObjBuilder& b ) const; + + /** + * removes queues that have been idle + * @return if something was removed + */ + bool cleanupOldQueues(); + private: - // a map from mongos's serverIDs to queues of "rejected" operations - // an operation is rejected if it targets data that does not live on this shard anymore - typedef map< string , BlockingQueue<BSONObj>* > WriteBackQueuesMap; - + // '_writebackQueueLock' protects only the map itself, since each queue is syncrhonized. mutable mongo::mutex _writebackQueueLock; WriteBackQueuesMap _writebackQueues; + + class Cleaner : public PeriodicTask { + public: + virtual string taskName() const { return "WriteBackManager::cleaner"; } + virtual void taskDoWork(); + }; + Cleaner _cleaner; }; // TODO collect global state in a central place and init during startup diff --git a/s/dbgrid.vcxproj b/s/dbgrid.vcxproj index 61a8458..33d6221 100644 --- a/s/dbgrid.vcxproj +++ b/s/dbgrid.vcxproj @@ -1,587 +1,616 @@ -<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
- <ItemGroup Label="ProjectConfigurations">
- <ProjectConfiguration Include="Debug|Win32">
- <Configuration>Debug</Configuration>
- <Platform>Win32</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Debug|x64">
- <Configuration>Debug</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Release|Win32">
- <Configuration>Release</Configuration>
- <Platform>Win32</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Release|x64">
- <Configuration>Release</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- </ItemGroup>
- <PropertyGroup Label="Globals">
- <ProjectName>mongos</ProjectName>
- <ProjectGuid>{E03717ED-69B4-4D21-BC55-DF6690B585C6}</ProjectGuid>
- <RootNamespace>dbgrid</RootNamespace>
- <Keyword>Win32Proj</Keyword>
- </PropertyGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <CharacterSet>Unicode</CharacterSet>
- <WholeProgramOptimization>true</WholeProgramOptimization>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <CharacterSet>Unicode</CharacterSet>
- <WholeProgramOptimization>true</WholeProgramOptimization>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <CharacterSet>Unicode</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <CharacterSet>Unicode</CharacterSet>
- </PropertyGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
- <ImportGroup Label="ExtensionSettings">
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <PropertyGroup Label="UserMacros" />
- <PropertyGroup>
- <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)$(Configuration)\</OutDir>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(Configuration)\</IntDir>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(Configuration)\</IntDir>
- <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</LinkIncremental>
- <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</LinkIncremental>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
- <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)$(Configuration)\</OutDir>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(Configuration)\</IntDir>
- <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(Configuration)\</IntDir>
- <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</LinkIncremental>
- <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</LinkIncremental>
- <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
- <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
- <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
- <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
- <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
- <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
- <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
- <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
- <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
- <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
- <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
- <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">..;$(IncludePath)</IncludePath>
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">..;$(IncludePath)</IncludePath>
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">..;$(IncludePath)</IncludePath>
- <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|x64'">..;$(IncludePath)</IncludePath>
- </PropertyGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- <ClCompile>
- <Optimization>Disabled</Optimization>
- <AdditionalIncludeDirectories>..\..\js\src;..\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>_UNICODE;UNICODE;SUPPORT_UCP;SUPPORT_UTF8;MONGO_EXPOSE_MACROS;XP_WIN;OLDJS;STATIC_JS_API;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <MinimalRebuild>No</MinimalRebuild>
- <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
- <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
- <PrecompiledHeader>Use</PrecompiledHeader>
- <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
- <WarningLevel>Level3</WarningLevel>
- <DebugInformationFormat>EditAndContinue</DebugInformationFormat>
- <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- </ClCompile>
- <Link>
- <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <SubSystem>Console</SubSystem>
- <TargetMachine>MachineX86</TargetMachine>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- <ClCompile>
- <Optimization>Disabled</Optimization>
- <AdditionalIncludeDirectories>..\..\js\src;..\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>_UNICODE;UNICODE;SUPPORT_UCP;SUPPORT_UTF8;MONGO_EXPOSE_MACROS;XP_WIN;OLDJS;STATIC_JS_API;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
- <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
- <PrecompiledHeader>Use</PrecompiledHeader>
- <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
- <WarningLevel>Level3</WarningLevel>
- <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
- <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- <MinimalRebuild>No</MinimalRebuild>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- </ClCompile>
- <Link>
- <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <SubSystem>Console</SubSystem>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- <ClCompile>
- <Optimization>MaxSpeed</Optimization>
- <IntrinsicFunctions>true</IntrinsicFunctions>
- <AdditionalIncludeDirectories>..\..\js\src;..\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>_UNICODE;UNICODE;SUPPORT_UCP;SUPPORT_UTF8;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
- <FunctionLevelLinking>true</FunctionLevelLinking>
- <PrecompiledHeader>Use</PrecompiledHeader>
- <WarningLevel>Level3</WarningLevel>
- <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
- <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- <MinimalRebuild>No</MinimalRebuild>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
- </ClCompile>
- <Link>
- <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <SubSystem>Console</SubSystem>
- <OptimizeReferences>true</OptimizeReferences>
- <EnableCOMDATFolding>true</EnableCOMDATFolding>
- <TargetMachine>MachineX86</TargetMachine>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- <ClCompile>
- <Optimization>MaxSpeed</Optimization>
- <IntrinsicFunctions>true</IntrinsicFunctions>
- <AdditionalIncludeDirectories>..\..\js\src;..\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>_UNICODE;UNICODE;SUPPORT_UCP;SUPPORT_UTF8;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
- <FunctionLevelLinking>true</FunctionLevelLinking>
- <PrecompiledHeader>Use</PrecompiledHeader>
- <WarningLevel>Level3</WarningLevel>
- <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
- <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- <MinimalRebuild>No</MinimalRebuild>
- <MultiProcessorCompilation>true</MultiProcessorCompilation>
- <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
- </ClCompile>
- <Link>
- <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <SubSystem>Console</SubSystem>
- <OptimizeReferences>true</OptimizeReferences>
- <EnableCOMDATFolding>true</EnableCOMDATFolding>
- </Link>
- </ItemDefinitionGroup>
- <ItemGroup>
- <ClCompile Include="..\bson\oid.cpp" />
- <ClCompile Include="..\client\dbclientcursor.cpp" />
- <ClCompile Include="..\client\dbclient_rs.cpp" />
- <ClCompile Include="..\client\distlock.cpp" />
- <ClCompile Include="..\db\dbwebserver.cpp" />
- <ClCompile Include="..\db\security_key.cpp" />
- <ClCompile Include="..\scripting\bench.cpp" />
- <ClCompile Include="..\util\alignedbuilder.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\util\concurrency\spin_lock.cpp" />
- <ClCompile Include="..\util\concurrency\task.cpp" />
- <ClCompile Include="..\util\concurrency\thread_pool.cpp" />
- <ClCompile Include="..\util\concurrency\vars.cpp" />
- <ClCompile Include="..\util\log.cpp" />
- <ClCompile Include="..\util\miniwebserver.cpp" />
- <ClCompile Include="..\util\processinfo.cpp" />
- <ClCompile Include="..\util\signal_handlers.cpp" />
- <ClCompile Include="..\util\stringutils.cpp" />
- <ClCompile Include="..\util\text.cpp" />
- <ClCompile Include="..\util\version.cpp" />
- <ClCompile Include="balance.cpp" />
- <ClCompile Include="balancer_policy.cpp" />
- <ClCompile Include="chunk.cpp" />
- <ClCompile Include="client.cpp" />
- <ClCompile Include="commands_admin.cpp" />
- <ClCompile Include="commands_public.cpp" />
- <ClCompile Include="config.cpp" />
- <ClCompile Include="config_migrate.cpp" />
- <ClCompile Include="cursors.cpp" />
- <ClCompile Include="..\pch.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\db\queryutil.cpp" />
- <ClCompile Include="grid.cpp" />
- <ClCompile Include="request.cpp" />
- <ClCompile Include="shardconnection.cpp" />
- <ClCompile Include="shard_version.cpp" />
- <ClCompile Include="s_only.cpp" />
- <ClCompile Include="server.cpp" />
- <ClCompile Include="shard.cpp" />
- <ClCompile Include="shardkey.cpp" />
- <ClCompile Include="stats.cpp" />
- <ClCompile Include="strategy.cpp" />
- <ClCompile Include="strategy_shard.cpp" />
- <ClCompile Include="strategy_single.cpp" />
- <ClCompile Include="..\scripting\utils.cpp" />
- <ClCompile Include="..\client\connpool.cpp" />
- <ClCompile Include="..\pcre-7.4\pcrecpp.cc">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_chartables.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_compile.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_config.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_dfa_exec.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_exec.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_fullinfo.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_get.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_globals.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_info.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_maketables.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_newline.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_ord2utf8.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_refcount.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_scanner.cc">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_stringpiece.cc">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_study.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_tables.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_try_flipped.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_ucp_searchfuncs.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_valid_utf8.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_version.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_xclass.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcreposix.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\client\dbclient.cpp" />
- <ClCompile Include="..\client\model.cpp" />
- <ClCompile Include="..\util\assert_util.cpp" />
- <ClCompile Include="..\util\background.cpp" />
- <ClCompile Include="..\util\base64.cpp" />
- <ClCompile Include="..\db\cmdline.cpp" />
- <ClCompile Include="..\db\commands.cpp" />
- <ClCompile Include="..\db\stats\counters.cpp" />
- <ClCompile Include="..\util\debug_util.cpp" />
- <ClCompile Include="..\scripting\engine.cpp" />
- <ClCompile Include="..\scripting\engine_spidermonkey.cpp" />
- <ClCompile Include="..\db\indexkey.cpp" />
- <ClCompile Include="..\db\jsobj.cpp" />
- <ClCompile Include="..\db\json.cpp" />
- <ClCompile Include="..\db\lasterror.cpp" />
- <ClCompile Include="..\db\matcher.cpp" />
- <ClCompile Include="..\util\md5.c">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\util\md5main.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Use</PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Use</PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\util\message.cpp" />
- <ClCompile Include="..\util\message_server_port.cpp" />
- <ClCompile Include="..\util\mmap.cpp" />
- <ClCompile Include="..\util\mmap_win.cpp" />
- <ClCompile Include="..\shell\mongo_vstudio.cpp">
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- </PrecompiledHeader>
- <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- </PrecompiledHeader>
- </ClCompile>
- <ClCompile Include="..\db\nonce.cpp" />
- <ClCompile Include="..\client\parallel.cpp" />
- <ClCompile Include="..\util\processinfo_win32.cpp" />
- <ClCompile Include="..\util\sock.cpp" />
- <ClCompile Include="..\client\syncclusterconnection.cpp" />
- <ClCompile Include="..\util\util.cpp" />
- <ClCompile Include="writeback_listener.cpp" />
- </ItemGroup>
- <ItemGroup>
- <ClInclude Include="..\util\processinfo.h" />
- <ClInclude Include="..\util\signal_handlers.h" />
- <ClInclude Include="..\util\version.h" />
- <ClInclude Include="balancer_policy.h" />
- <ClInclude Include="grid.h" />
- <ClInclude Include="gridconfig.h" />
- <ClInclude Include="griddatabase.h" />
- <ClInclude Include="shard.h" />
- <ClInclude Include="strategy.h" />
- <ClInclude Include="..\util\background.h" />
- <ClInclude Include="..\db\commands.h" />
- <ClInclude Include="..\db\dbmessage.h" />
- <ClInclude Include="..\util\goodies.h" />
- <ClInclude Include="..\db\jsobj.h" />
- <ClInclude Include="..\db\json.h" />
- <ClInclude Include="..\pch.h" />
- <ClInclude Include="..\..\boostw\boost_1_34_1\boost\config\auto_link.hpp" />
- <ClInclude Include="..\..\boostw\boost_1_34_1\boost\version.hpp" />
- <ClInclude Include="..\pcre-7.4\config.h" />
- <ClInclude Include="..\pcre-7.4\pcre.h" />
- <ClInclude Include="..\client\connpool.h" />
- <ClInclude Include="..\client\dbclient.h" />
- <ClInclude Include="..\client\model.h" />
- </ItemGroup>
- <ItemGroup>
- <Library Include="..\..\js\js32d.lib">
- <FileType>Document</FileType>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
- </Library>
- <Library Include="..\..\js\js32r.lib">
- <FileType>Document</FileType>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
- </Library>
- <Library Include="..\..\js\js64d.lib">
- <FileType>Document</FileType>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
- </Library>
- <Library Include="..\..\js\js64r.lib">
- <FileType>Document</FileType>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
- <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
- </Library>
- </ItemGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
- <ImportGroup Label="ExtensionTargets">
- </ImportGroup>
+<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectName>mongos</ProjectName> + <ProjectGuid>{E03717ED-69B4-4D21-BC55-DF6690B585C6}</ProjectGuid> + <RootNamespace>dbgrid</RootNamespace> + <Keyword>Win32Proj</Keyword> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <CharacterSet>Unicode</CharacterSet> + <WholeProgramOptimization>true</WholeProgramOptimization> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <CharacterSet>Unicode</CharacterSet> + <WholeProgramOptimization>true</WholeProgramOptimization> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <CharacterSet>Unicode</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <CharacterSet>Unicode</CharacterSet> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup> + <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion> + <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)$(Configuration)\</OutDir> + <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)$(Configuration)\</OutDir> + <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(Configuration)\</IntDir> + <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(Configuration)\</IntDir> + <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</LinkIncremental> + <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</LinkIncremental> + <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)$(Configuration)\</OutDir> + <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)$(Configuration)\</OutDir> + <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(Configuration)\</IntDir> + <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(Configuration)\</IntDir> + <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</LinkIncremental> + <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</LinkIncremental> + <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet> + <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet> + <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" /> + <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" /> + <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" /> + <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" /> + <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet> + <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet> + <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" /> + <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" /> + <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" /> + <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" /> + <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">..;$(IncludePath)</IncludePath> + <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">..;$(IncludePath)</IncludePath> + <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">..;$(IncludePath)</IncludePath> + <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|x64'">..;$(IncludePath)</IncludePath> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <Optimization>Disabled</Optimization> + <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> + <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;XP_WIN;OLDJS;STATIC_JS_API;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions> + <MinimalRebuild>No</MinimalRebuild> + <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks> + <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary> + <PrecompiledHeader>Use</PrecompiledHeader> + <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile> + <WarningLevel>Level3</WarningLevel> + <DebugInformationFormat>EditAndContinue</DebugInformationFormat> + <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings> + <MultiProcessorCompilation>true</MultiProcessorCompilation> + </ClCompile> + <Link> + <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies> + <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories> + <GenerateDebugInformation>true</GenerateDebugInformation> + <SubSystem>Console</SubSystem> + <TargetMachine>MachineX86</TargetMachine> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <Optimization>Disabled</Optimization> + <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> + <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;XP_WIN;OLDJS;STATIC_JS_API;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions> + <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks> + <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary> + <PrecompiledHeader>Use</PrecompiledHeader> + <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile> + <WarningLevel>Level3</WarningLevel> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings> + <MinimalRebuild>No</MinimalRebuild> + <MultiProcessorCompilation>true</MultiProcessorCompilation> + </ClCompile> + <Link> + <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies> + <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories> + <GenerateDebugInformation>true</GenerateDebugInformation> + <SubSystem>Console</SubSystem> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <Optimization>MaxSpeed</Optimization> + <IntrinsicFunctions>true</IntrinsicFunctions> + <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> + <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <FunctionLevelLinking>true</FunctionLevelLinking> + <PrecompiledHeader>Use</PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings> + <MinimalRebuild>No</MinimalRebuild> + <MultiProcessorCompilation>true</MultiProcessorCompilation> + <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile> + </ClCompile> + <Link> + <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies> + <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories> + <GenerateDebugInformation>true</GenerateDebugInformation> + <SubSystem>Console</SubSystem> + <OptimizeReferences>true</OptimizeReferences> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <TargetMachine>MachineX86</TargetMachine> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <Optimization>MaxSpeed</Optimization> + <IntrinsicFunctions>true</IntrinsicFunctions> + <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> + <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <FunctionLevelLinking>true</FunctionLevelLinking> + <PrecompiledHeader>Use</PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings> + <MinimalRebuild>No</MinimalRebuild> + <MultiProcessorCompilation>true</MultiProcessorCompilation> + <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile> + </ClCompile> + <Link> + <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies> + <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories> + <GenerateDebugInformation>true</GenerateDebugInformation> + <SubSystem>Console</SubSystem> + <OptimizeReferences>true</OptimizeReferences> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + </Link> + </ItemDefinitionGroup> + <ItemGroup> + <ClCompile Include="..\bson\oid.cpp" /> + <ClCompile Include="..\client\dbclientcursor.cpp" /> + <ClCompile Include="..\client\dbclient_rs.cpp" /> + <ClCompile Include="..\client\distlock.cpp" /> + <ClCompile Include="..\db\common.cpp" /> + <ClCompile Include="..\db\dbmessage.cpp" /> + <ClCompile Include="..\db\dbcommands_generic.cpp" /> + <ClCompile Include="..\db\dbwebserver.cpp" /> + <ClCompile Include="..\db\querypattern.cpp"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\db\security_common.cpp" /> + <ClCompile Include="..\scripting\bench.cpp" /> + <ClCompile Include="..\util\alignedbuilder.cpp"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\util\concurrency\spin_lock.cpp" /> + <ClCompile Include="..\util\concurrency\task.cpp" /> + <ClCompile Include="..\util\concurrency\thread_pool.cpp" /> + <ClCompile Include="..\util\concurrency\vars.cpp" /> + <ClCompile Include="..\util\log.cpp" /> + <ClCompile Include="..\util\net\miniwebserver.cpp" /> + <ClCompile Include="..\util\net\listen.cpp" /> + <ClCompile Include="..\util\processinfo.cpp" /> + <ClCompile Include="..\util\ramlog.cpp" /> + <ClCompile Include="..\util\signal_handlers.cpp" /> + <ClCompile Include="..\util\stringutils.cpp" /> + <ClCompile Include="..\util\text.cpp" /> + <ClCompile Include="..\util\version.cpp" /> + <ClCompile Include="balance.cpp" /> + <ClCompile Include="balancer_policy.cpp" /> + <ClCompile Include="chunk.cpp" /> + <ClCompile Include="client.cpp" /> + <ClCompile Include="commands_admin.cpp" /> + <ClCompile Include="commands_public.cpp" /> + <ClCompile Include="config.cpp" /> + <ClCompile Include="config_migrate.cpp" /> + <ClCompile Include="cursors.cpp" /> + <ClCompile Include="..\pch.cpp"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\db\queryutil.cpp" /> + <ClCompile Include="grid.cpp" /> + <ClCompile Include="mr_shard.cpp" /> + <ClCompile Include="request.cpp" /> + <ClCompile Include="security.cpp" /> + <ClCompile Include="shardconnection.cpp" /> + <ClCompile Include="shard_version.cpp" /> + <ClCompile Include="s_only.cpp" /> + <ClCompile Include="server.cpp" /> + <ClCompile Include="shard.cpp" /> + <ClCompile Include="shardkey.cpp" /> + <ClCompile Include="stats.cpp" /> + <ClCompile Include="strategy.cpp" /> + <ClCompile Include="strategy_shard.cpp" /> + <ClCompile Include="strategy_single.cpp" /> + <ClCompile Include="..\scripting\utils.cpp" /> + <ClCompile Include="..\client\connpool.cpp" /> + <ClCompile Include="..\third_party\pcre-7.4\pcrecpp.cc"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_chartables.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_compile.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_config.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_dfa_exec.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_exec.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_fullinfo.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_get.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_globals.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_info.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_maketables.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_newline.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_ord2utf8.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_refcount.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_scanner.cc"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_stringpiece.cc"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_study.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_tables.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_try_flipped.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_ucp_searchfuncs.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_valid_utf8.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_version.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcre_xclass.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\third_party\pcre-7.4\pcreposix.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\client\dbclient.cpp" /> + <ClCompile Include="..\client\model.cpp" /> + <ClCompile Include="..\util\assert_util.cpp" /> + <ClCompile Include="..\util\background.cpp" /> + <ClCompile Include="..\util\base64.cpp" /> + <ClCompile Include="..\db\cmdline.cpp" /> + <ClCompile Include="..\db\commands.cpp" /> + <ClCompile Include="..\db\stats\counters.cpp" /> + <ClCompile Include="..\util\debug_util.cpp" /> + <ClCompile Include="..\scripting\engine.cpp" /> + <ClCompile Include="..\scripting\engine_spidermonkey.cpp" /> + <ClCompile Include="..\db\indexkey.cpp" /> + <ClCompile Include="..\db\jsobj.cpp" /> + <ClCompile Include="..\db\json.cpp" /> + <ClCompile Include="..\db\lasterror.cpp" /> + <ClCompile Include="..\db\matcher.cpp" /> + <ClCompile Include="..\util\md5.c"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\util\md5main.cpp"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Use</PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Use</PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\util\net\message.cpp" /> + <ClCompile Include="..\util\net\message_port.cpp" /> + <ClCompile Include="..\util\net\message_server_port.cpp" /> + <ClCompile Include="..\util\mmap.cpp" /> + <ClCompile Include="..\util\mmap_win.cpp" /> + <ClCompile Include="..\shell\mongo_vstudio.cpp"> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + <ClCompile Include="..\db\nonce.cpp" /> + <ClCompile Include="..\client\parallel.cpp" /> + <ClCompile Include="..\util\processinfo_win32.cpp" /> + <ClCompile Include="..\util\net\sock.cpp" /> + <ClCompile Include="..\client\syncclusterconnection.cpp" /> + <ClCompile Include="..\util\util.cpp" /> + <ClCompile Include="writeback_listener.cpp" /> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\util\processinfo.h" /> + <ClInclude Include="..\util\signal_handlers.h" /> + <ClInclude Include="..\util\version.h" /> + <ClInclude Include="balance.h" /> + <ClInclude Include="balancer_policy.h" /> + <ClInclude Include="chunk.h" /> + <ClInclude Include="client.h" /> + <ClInclude Include="config.h" /> + <ClInclude Include="cursors.h" /> + <ClInclude Include="d_chunk_manager.h" /> + <ClInclude Include="d_logic.h" /> + <ClInclude Include="d_writeback.h" /> + <ClInclude Include="grid.h" /> + <ClInclude Include="gridconfig.h" /> + <ClInclude Include="griddatabase.h" /> + <ClInclude Include="request.h" /> + <ClInclude Include="server.h" /> + <ClInclude Include="shard.h" /> + <ClInclude Include="shardkey.h" /> + <ClInclude Include="shard_version.h" /> + <ClInclude Include="stats.h" /> + <ClInclude Include="strategy.h" /> + <ClInclude Include="..\util\background.h" /> + <ClInclude Include="..\db\commands.h" /> + <ClInclude Include="..\db\dbmessage.h" /> + <ClInclude Include="..\util\goodies.h" /> + <ClInclude Include="..\db\jsobj.h" /> + <ClInclude Include="..\db\json.h" /> + <ClInclude Include="..\pch.h" /> + <ClInclude Include="..\..\boostw\boost_1_34_1\boost\config\auto_link.hpp" /> + <ClInclude Include="..\..\boostw\boost_1_34_1\boost\version.hpp" /> + <ClInclude Include="..\third_party\pcre-7.4\config.h" /> + <ClInclude Include="..\third_party\pcre-7.4\pcre.h" /> + <ClInclude Include="..\client\connpool.h" /> + <ClInclude Include="..\client\dbclient.h" /> + <ClInclude Include="..\client\model.h" /> + <ClInclude Include="util.h" /> + <ClInclude Include="writeback_listener.h" /> + </ItemGroup> + <ItemGroup> + <Library Include="..\..\js\js32d.lib"> + <FileType>Document</FileType> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + </Library> + <Library Include="..\..\js\js32r.lib"> + <FileType>Document</FileType> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + </Library> + <Library Include="..\..\js\js64d.lib"> + <FileType>Document</FileType> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + </Library> + <Library Include="..\..\js\js64r.lib"> + <FileType>Document</FileType> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + </Library> + </ItemGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> </Project>
\ No newline at end of file diff --git a/s/dbgrid.vcxproj.filters b/s/dbgrid.vcxproj.filters index b87a1f2..e417e95 100755 --- a/s/dbgrid.vcxproj.filters +++ b/s/dbgrid.vcxproj.filters @@ -83,78 +83,6 @@ <ClCompile Include="..\client\connpool.cpp">
<Filter>Header Files\Header Shared</Filter>
</ClCompile>
- <ClCompile Include="..\pcre-7.4\pcrecpp.cc">
- <Filter>libs_etc</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_chartables.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_compile.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_config.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_dfa_exec.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_exec.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_fullinfo.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_get.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_globals.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_info.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_maketables.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_newline.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_ord2utf8.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_refcount.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_scanner.cc">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_stringpiece.cc">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_study.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_tables.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_try_flipped.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_ucp_searchfuncs.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_valid_utf8.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_version.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcre_xclass.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
- <ClCompile Include="..\pcre-7.4\pcreposix.c">
- <Filter>libs_etc\pcre</Filter>
- </ClCompile>
<ClCompile Include="..\client\dbclient.cpp">
<Filter>client</Filter>
</ClCompile>
@@ -317,9 +245,27 @@ <ClCompile Include="client.cpp">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="..\db\security_key.cpp">
+ <ClCompile Include="..\db\dbcommands_generic.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\querypattern.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\ramlog.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="mr_shard.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\common.cpp">
<Filter>Shared Source Files</Filter>
</ClCompile>
+ <ClCompile Include="..\db\security_common.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="security.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="gridconfig.h">
@@ -391,6 +337,51 @@ <ClInclude Include="..\util\signal_handlers.h">
<Filter>Shared Source Files</Filter>
</ClInclude>
+ <ClInclude Include="writeback_listener.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="balance.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="chunk.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="client.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="config.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="cursors.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="d_chunk_manager.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="d_logic.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="d_writeback.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="request.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="server.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="shard_version.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="shardkey.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="stats.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="util.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
</ItemGroup>
<ItemGroup>
<Library Include="..\..\js\js32d.lib" />
@@ -119,12 +119,14 @@ namespace mongo { } bool Grid::addShard( string* name , const ConnectionString& servers , long long maxSize , string& errMsg ) { - // name can be NULL, so privide a dummy one here to avoid testing it elsewhere + // name can be NULL, so provide a dummy one here to avoid testing it elsewhere string nameInternal; if ( ! name ) { name = &nameInternal; } + ReplicaSetMonitorPtr rsMonitor; + // Check whether the host (or set) exists and run several sanity checks on this request. // There are two set of sanity checks: making sure adding this particular shard is consistent // with the replica set state (if it exists) and making sure this shards databases can be @@ -140,7 +142,7 @@ namespace mongo { errMsg = "can't use sync cluster as a shard. for replica set, have to use <setname>/<server1>,<server2>,..."; return false; } - + BSONObj resIsMongos; bool ok = newShardConn->runCommand( "admin" , BSON( "isdbgrid" << 1 ) , resIsMongos ); @@ -171,6 +173,13 @@ namespace mongo { newShardConn.done(); return false; } + if ( !commandSetName.empty() && setName.empty() ) { + ostringstream ss; + ss << "host did not return a set name, is the replica set still initializing? " << resIsMaster; + errMsg = ss.str(); + newShardConn.done(); + return false; + } // if the shard is part of replica set, make sure it is the right one if ( ! commandSetName.empty() && ( commandSetName != setName ) ) { @@ -197,6 +206,12 @@ namespace mongo { hostSet.insert( piter.next().String() ); // host:port } } + if ( resIsMaster["arbiters"].isABSONObj() ) { + BSONObjIterator piter( resIsMaster["arbiters"].Obj() ); + while ( piter.more() ) { + hostSet.insert( piter.next().String() ); // host:port + } + } vector<HostAndPort> hosts = servers.getServers(); for ( size_t i = 0 ; i < hosts.size() ; i++ ) { @@ -213,7 +228,8 @@ namespace mongo { } if ( ! foundAll ) { ostringstream ss; - ss << "host " << offendingHost << " does not belong to replica set as a non-passive member" << setName;; + ss << "in seed list " << servers.toString() << ", host " << offendingHost + << " does not belong to replica set " << setName; errMsg = ss.str(); newShardConn.done(); return false; @@ -250,6 +266,9 @@ namespace mongo { } } + if ( newShardConn->type() == ConnectionString::SET ) + rsMonitor = ReplicaSetMonitor::get( setName ); + newShardConn.done(); } catch ( DBException& e ) { @@ -281,7 +300,7 @@ namespace mongo { // build the ConfigDB shard document BSONObjBuilder b; b.append( "_id" , *name ); - b.append( "host" , servers.toString() ); + b.append( "host" , rsMonitor ? rsMonitor->getServerAddress() : servers.toString() ); if ( maxSize > 0 ) { b.append( ShardFields::maxSize.name() , maxSize ); } @@ -375,10 +394,7 @@ namespace mongo { // check the 'stopped' marker maker // if present, it is a simple bool BSONElement stoppedElem = balancerDoc["stopped"]; - if ( ! stoppedElem.eoo() && stoppedElem.isBoolean() ) { - return stoppedElem.boolean(); - } - return false; + return stoppedElem.trueValue(); } bool Grid::_inBalancingWindow( const BSONObj& balancerDoc , const boost::posix_time::ptime& now ) { @@ -392,24 +408,32 @@ namespace mongo { // check if both 'start' and 'stop' are present if ( ! windowElem.isABSONObj() ) { - log(1) << "'activeWindow' format is { start: \"hh:mm\" , stop: ... }" << balancerDoc << endl; + warning() << "'activeWindow' format is { start: \"hh:mm\" , stop: ... }" << balancerDoc << endl; return true; } BSONObj intervalDoc = windowElem.Obj(); const string start = intervalDoc["start"].str(); const string stop = intervalDoc["stop"].str(); if ( start.empty() || stop.empty() ) { - log(1) << "must specify both start and end of balancing window: " << intervalDoc << endl; + warning() << "must specify both start and end of balancing window: " << intervalDoc << endl; return true; } // check that both 'start' and 'stop' are valid time-of-day boost::posix_time::ptime startTime, stopTime; if ( ! toPointInTime( start , &startTime ) || ! toPointInTime( stop , &stopTime ) ) { - log(1) << "cannot parse active window (use hh:mm 24hs format): " << intervalDoc << endl; + warning() << "cannot parse active window (use hh:mm 24hs format): " << intervalDoc << endl; return true; } + if ( logLevel ) { + stringstream ss; + ss << " now: " << now + << " startTime: " << startTime + << " stopTime: " << stopTime; + log() << "_inBalancingWindow: " << ss.str() << endl; + } + // allow balancing if during the activeWindow // note that a window may be open during the night if ( stopTime > startTime ) { @@ -453,6 +477,10 @@ namespace mongo { class BalancingWindowUnitTest : public UnitTest { public: void run() { + + if ( ! cmdLine.isMongos() ) + return; + // T0 < T1 < now < T2 < T3 and Error const string T0 = "9:00"; const string T1 = "11:00"; @@ -485,7 +513,7 @@ namespace mongo { assert( Grid::_inBalancingWindow( w8 , now ) ); assert( Grid::_inBalancingWindow( w9 , now ) ); - log(1) << "BalancingWidowObjTest passed" << endl; + LOG(1) << "BalancingWidowObjTest passed" << endl; } } BalancingWindowObjTest; diff --git a/s/mr_shard.cpp b/s/mr_shard.cpp new file mode 100644 index 0000000..93f49d1 --- /dev/null +++ b/s/mr_shard.cpp @@ -0,0 +1,312 @@ +// mr_shard.cpp + +/** + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License, version 3, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "pch.h" +#include "../util/net/message.h" +#include "../db/dbmessage.h" +#include "../scripting/engine.h" + +#include "mr_shard.h" + +namespace mongo { + + namespace mr_shard { + + AtomicUInt Config::JOB_NUMBER; + + JSFunction::JSFunction( string type , const BSONElement& e ) { + _type = type; + _code = e._asCode(); + + if ( e.type() == CodeWScope ) + _wantedScope = e.codeWScopeObject(); + } + + void JSFunction::init( State * state ) { + _scope = state->scope(); + assert( _scope ); + _scope->init( &_wantedScope ); + + _func = _scope->createFunction( _code.c_str() ); + uassert( 14836 , str::stream() << "couldn't compile code for: " << _type , _func ); + + // install in JS scope so that it can be called in JS mode + _scope->setFunction(_type.c_str(), _code.c_str()); + } + + /** + * Applies the finalize function to a tuple obj (key, val) + * Returns tuple obj {_id: key, value: newval} + */ + BSONObj JSFinalizer::finalize( const BSONObj& o ) { + Scope * s = _func.scope(); + + Scope::NoDBAccess no = s->disableDBAccess( "can't access db inside finalize" ); + s->invokeSafe( _func.func() , &o, 0 ); + + // don't want to use o.objsize() to size b + // since there are many cases where the point of finalize + // is converting many fields to 1 + BSONObjBuilder b; + b.append( o.firstElement() ); + s->append( b , "value" , "return" ); + return b.obj(); + } + + void JSReducer::init( State * state ) { + _func.init( state ); + } + + /** + * Reduces a list of tuple objects (key, value) to a single tuple {"0": key, "1": value} + */ + BSONObj JSReducer::reduce( const BSONList& tuples ) { + if (tuples.size() <= 1) + return tuples[0]; + BSONObj key; + int endSizeEstimate = 16; + _reduce( tuples , key , endSizeEstimate ); + + BSONObjBuilder b(endSizeEstimate); + b.appendAs( key.firstElement() , "0" ); + _func.scope()->append( b , "1" , "return" ); + return b.obj(); + } + + /** + * Reduces a list of tuple object (key, value) to a single tuple {_id: key, value: val} + * Also applies a finalizer method if present. + */ + BSONObj JSReducer::finalReduce( const BSONList& tuples , Finalizer * finalizer ) { + + BSONObj res; + BSONObj key; + + if (tuples.size() == 1) { + // 1 obj, just use it + key = tuples[0]; + BSONObjBuilder b(key.objsize()); + BSONObjIterator it(key); + b.appendAs( it.next() , "_id" ); + b.appendAs( it.next() , "value" ); + res = b.obj(); + } + else { + // need to reduce + int endSizeEstimate = 16; + _reduce( tuples , key , endSizeEstimate ); + BSONObjBuilder b(endSizeEstimate); + b.appendAs( key.firstElement() , "_id" ); + _func.scope()->append( b , "value" , "return" ); + res = b.obj(); + } + + if ( finalizer ) { + res = finalizer->finalize( res ); + } + + return res; + } + + /** + * actually applies a reduce, to a list of tuples (key, value). + * After the call, tuples will hold a single tuple {"0": key, "1": value} + */ + void JSReducer::_reduce( const BSONList& tuples , BSONObj& key , int& endSizeEstimate ) { + int sizeEstimate = ( tuples.size() * tuples.begin()->getField( "value" ).size() ) + 128; + + // need to build the reduce args: ( key, [values] ) + BSONObjBuilder reduceArgs( sizeEstimate ); + boost::scoped_ptr<BSONArrayBuilder> valueBuilder; + int sizeSoFar = 0; + unsigned n = 0; + for ( ; n<tuples.size(); n++ ) { + BSONObjIterator j(tuples[n]); + BSONElement keyE = j.next(); + if ( n == 0 ) { + reduceArgs.append( keyE ); + key = keyE.wrap(); + sizeSoFar = 5 + keyE.size(); + valueBuilder.reset(new BSONArrayBuilder( reduceArgs.subarrayStart( "tuples" ) )); + } + + BSONElement ee = j.next(); + + uassert( 14837 , "value too large to reduce" , ee.size() < ( BSONObjMaxUserSize / 2 ) ); + + if ( sizeSoFar + ee.size() > BSONObjMaxUserSize ) { + assert( n > 1 ); // if not, inf. loop + break; + } + + valueBuilder->append( ee ); + sizeSoFar += ee.size(); + } + assert(valueBuilder); + valueBuilder->done(); + BSONObj args = reduceArgs.obj(); + + Scope * s = _func.scope(); + + s->invokeSafe( _func.func() , &args, 0 ); + ++numReduces; + + if ( s->type( "return" ) == Array ) { + uasserted( 14838 , "reduce -> multiple not supported yet"); + return; + } + + endSizeEstimate = key.objsize() + ( args.objsize() / tuples.size() ); + + if ( n == tuples.size() ) + return; + + // the input list was too large, add the rest of elmts to new tuples and reduce again + // note: would be better to use loop instead of recursion to avoid stack overflow + BSONList x; + for ( ; n < tuples.size(); n++ ) { + x.push_back( tuples[n] ); + } + BSONObjBuilder temp( endSizeEstimate ); + temp.append( key.firstElement() ); + s->append( temp , "1" , "return" ); + x.push_back( temp.obj() ); + _reduce( x , key , endSizeEstimate ); + } + + Config::Config( const string& _dbname , const BSONObj& cmdObj ) { + + dbname = _dbname; + ns = dbname + "." + cmdObj.firstElement().valuestr(); + + verbose = cmdObj["verbose"].trueValue(); + jsMode = cmdObj["jsMode"].trueValue(); + + jsMaxKeys = 500000; + reduceTriggerRatio = 2.0; + maxInMemSize = 5 * 1024 * 1024; + + uassert( 14841 , "outType is no longer a valid option" , cmdObj["outType"].eoo() ); + + if ( cmdObj["out"].type() == String ) { + finalShort = cmdObj["out"].String(); + outType = REPLACE; + } + else if ( cmdObj["out"].type() == Object ) { + BSONObj o = cmdObj["out"].embeddedObject(); + + BSONElement e = o.firstElement(); + string t = e.fieldName(); + + if ( t == "normal" || t == "replace" ) { + outType = REPLACE; + finalShort = e.String(); + } + else if ( t == "merge" ) { + outType = MERGE; + finalShort = e.String(); + } + else if ( t == "reduce" ) { + outType = REDUCE; + finalShort = e.String(); + } + else if ( t == "inline" ) { + outType = INMEMORY; + } + else { + uasserted( 14839 , str::stream() << "unknown out specifier [" << t << "]" ); + } + + if (o.hasElement("db")) { + outDB = o["db"].String(); + } + } + else { + uasserted( 14840 , "'out' has to be a string or an object" ); + } + + if ( outType != INMEMORY ) { // setup names + tempLong = str::stream() << (outDB.empty() ? dbname : outDB) << ".tmp.mr." << cmdObj.firstElement().String() << "_" << finalShort << "_" << JOB_NUMBER++; + + incLong = tempLong + "_inc"; + + finalLong = str::stream() << (outDB.empty() ? dbname : outDB) << "." << finalShort; + } + + { + // scope and code + + if ( cmdObj["scope"].type() == Object ) + scopeSetup = cmdObj["scope"].embeddedObjectUserCheck(); + + reducer.reset( new JSReducer( cmdObj["reduce"] ) ); + if ( cmdObj["finalize"].type() && cmdObj["finalize"].trueValue() ) + finalizer.reset( new JSFinalizer( cmdObj["finalize"] ) ); + + } + + { + // query options + if ( cmdObj["limit"].isNumber() ) + limit = cmdObj["limit"].numberLong(); + else + limit = 0; + } + } + + State::State( const Config& c ) : _config( c ) { + _onDisk = _config.outType != Config::INMEMORY; + } + + State::~State() { + if ( _onDisk ) { + try { +// _db.dropCollection( _config.tempLong ); +// _db.dropCollection( _config.incLong ); + } + catch ( std::exception& e ) { + error() << "couldn't cleanup after map reduce: " << e.what() << endl; + } + } + + if (_scope) { + // cleanup js objects + ScriptingFunction cleanup = _scope->createFunction("delete _emitCt; delete _keyCt; delete _mrMap;"); + _scope->invoke(cleanup, 0, 0, 0, true); + } + } + + /** + * Initialize the mapreduce operation, creating the inc collection + */ + void State::init() { + // setup js + _scope.reset(globalScriptEngine->getPooledScope( _config.dbname ).release() ); +// _scope->localConnect( _config.dbname.c_str() ); + _scope->externalSetup(); + + if ( ! _config.scopeSetup.isEmpty() ) + _scope->init( &_config.scopeSetup ); + + _config.reducer->init( this ); + if ( _config.finalizer ) + _config.finalizer->init( this ); + _scope->setBoolean("_doFinal", _config.finalizer); + } + } +} + diff --git a/s/mr_shard.h b/s/mr_shard.h new file mode 100644 index 0000000..9603ba9 --- /dev/null +++ b/s/mr_shard.h @@ -0,0 +1,232 @@ +// mr_shard.h + +/** + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License, version 3, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#pragma once + +#include "pch.h" + +namespace mongo { + + namespace mr_shard { + + typedef vector<BSONObj> BSONList; + + class State; + + // ------------ function interfaces ----------- + + class Finalizer : boost::noncopyable { + public: + virtual ~Finalizer() {} + virtual void init( State * state ) = 0; + + /** + * this takes a tuple and returns a tuple + */ + virtual BSONObj finalize( const BSONObj& tuple ) = 0; + }; + + class Reducer : boost::noncopyable { + public: + Reducer() : numReduces(0) {} + virtual ~Reducer() {} + virtual void init( State * state ) = 0; + + virtual BSONObj reduce( const BSONList& tuples ) = 0; + /** this means its a final reduce, even if there is no finalizer */ + virtual BSONObj finalReduce( const BSONList& tuples , Finalizer * finalizer ) = 0; + + long long numReduces; + }; + + // ------------ js function implementations ----------- + + /** + * used as a holder for Scope and ScriptingFunction + * visitor like pattern as Scope is gotten from first access + */ + class JSFunction : boost::noncopyable { + public: + /** + * @param type (map|reduce|finalize) + */ + JSFunction( string type , const BSONElement& e ); + virtual ~JSFunction() {} + + virtual void init( State * state ); + + Scope * scope() const { return _scope; } + ScriptingFunction func() const { return _func; } + + private: + string _type; + string _code; // actual javascript code + BSONObj _wantedScope; // this is for CodeWScope + + Scope * _scope; // this is not owned by us, and might be shared + ScriptingFunction _func; + }; + + class JSReducer : public Reducer { + public: + JSReducer( const BSONElement& code ) : _func( "_reduce" , code ) {} + virtual void init( State * state ); + + virtual BSONObj reduce( const BSONList& tuples ); + virtual BSONObj finalReduce( const BSONList& tuples , Finalizer * finalizer ); + + private: + + /** + * result in "return" + * @param key OUT + * @param endSizeEstimate OUT + */ + void _reduce( const BSONList& values , BSONObj& key , int& endSizeEstimate ); + + JSFunction _func; + }; + + class JSFinalizer : public Finalizer { + public: + JSFinalizer( const BSONElement& code ) : _func( "_finalize" , code ) {} + virtual BSONObj finalize( const BSONObj& o ); + virtual void init( State * state ) { _func.init( state ); } + private: + JSFunction _func; + + }; + + // ----------------- + + /** + * holds map/reduce config information + */ + class Config { + public: + Config( const string& _dbname , const BSONObj& cmdObj ); + + string dbname; + string ns; + + // options + bool verbose; + bool jsMode; + + // query options + + BSONObj filter; + BSONObj sort; + long long limit; + + // functions + scoped_ptr<Reducer> reducer; + scoped_ptr<Finalizer> finalizer; + + BSONObj mapParams; + BSONObj scopeSetup; + + // output tables + string incLong; + string tempLong; + + string finalShort; + string finalLong; + + string outDB; + + // max number of keys allowed in JS map before switching mode + long jsMaxKeys; + // ratio of duplicates vs unique keys before reduce is triggered in js mode + float reduceTriggerRatio; + // maximum size of map before it gets dumped to disk + long maxInMemSize; + + enum { REPLACE , // atomically replace the collection + MERGE , // merge keys, override dups + REDUCE , // merge keys, reduce dups + INMEMORY // only store in memory, limited in size + } outType; + + static AtomicUInt JOB_NUMBER; + }; // end MRsetup + + /** + * stores information about intermediate map reduce state + * controls flow of data from map->reduce->finalize->output + */ + class State { + public: + State( const Config& c ); + ~State(); + + void init(); + + // ---- prep ----- + bool sourceExists(); + + long long incomingDocuments(); + + // ---- map stage ---- + + /** + * stages on in in-memory storage + */ + void emit( const BSONObj& a ); + + /** + * if size is big, run a reduce + * if its still big, dump to temp collection + */ + void checkSize(); + + /** + * run reduce on _temp + */ + void reduceInMemory(); + + // ------ reduce stage ----------- + + void prepTempCollection(); + + void finalReduce( BSONList& values ); + + void finalReduce( CurOp * op , ProgressMeterHolder& pm ); + + // ------ simple accessors ----- + + /** State maintains ownership, do no use past State lifetime */ + Scope* scope() { return _scope.get(); } + + const Config& config() { return _config; } + + const bool isOnDisk() { return _onDisk; } + + long long numReduces() const { return _config.reducer->numReduces; } + + const Config& _config; + + protected: + + scoped_ptr<Scope> _scope; + bool _onDisk; // if the end result of this map reduce is disk or not + }; + + } // end mr namespace +} + + diff --git a/s/request.cpp b/s/request.cpp index 32c17cc..36488cb 100644 --- a/s/request.cpp +++ b/s/request.cpp @@ -43,7 +43,12 @@ namespace mongo { _clientInfo = ClientInfo::get(); _clientInfo->newRequest( p ); + } + void Request::checkAuth() const { + char cl[256]; + nsToDatabase(getns(), cl); + uassert(15845, "unauthorized", _clientInfo->getAuthenticationInfo()->isAuthorized(cl)); } void Request::init() { @@ -60,13 +65,21 @@ namespace mongo { uassert( 13644 , "can't use 'local' database through mongos" , ! str::startsWith( getns() , "local." ) ); - _config = grid.getDBConfig( getns() ); - if ( reload ) - uassert( 10192 , "db config reload failed!" , _config->reload() ); + const string nsStr (getns()); // use in functions taking string rather than char* + + _config = grid.getDBConfig( nsStr ); + if ( reload ) { + if ( _config->isSharded( nsStr ) ) + _config->getChunkManager( nsStr , true ); + else + _config->reload(); + } - if ( _config->isSharded( getns() ) ) { - _chunkManager = _config->getChunkManager( getns() , reload ); - uassert( 10193 , (string)"no shard info for: " + getns() , _chunkManager ); + if ( _config->isSharded( nsStr ) ) { + _chunkManager = _config->getChunkManager( nsStr , reload ); + // TODO: All of these uasserts are no longer necessary, getChunkManager() throws when + // not returning the right value. + uassert( 10193 , (string)"no shard info for: " + nsStr , _chunkManager ); } else { _chunkManager.reset(); @@ -100,7 +113,7 @@ namespace mongo { } - log(3) << "Request::process ns: " << getns() << " msg id:" << (int)(_m.header()->id) << " attempt: " << attempt << endl; + LOG(3) << "Request::process ns: " << getns() << " msg id:" << (int)(_m.header()->id) << " attempt: " << attempt << endl; Strategy * s = SINGLE; _counter = &opsNonSharded; @@ -134,6 +147,7 @@ namespace mongo { s->getMore( *this ); } else { + checkAuth(); s->writeOp( op, *this ); } diff --git a/s/request.h b/s/request.h index 7c51e5c..86a484e 100644 --- a/s/request.h +++ b/s/request.h @@ -19,7 +19,7 @@ #pragma once #include "../pch.h" -#include "../util/message.h" +#include "../util/net/message.h" #include "../db/dbmessage.h" #include "config.h" #include "util.h" @@ -70,6 +70,8 @@ namespace mongo { return _clientInfo; } + void checkAuth() const; + // ---- remote location info ----- diff --git a/s/s_only.cpp b/s/s_only.cpp index 83bceac..6449b34 100644 --- a/s/s_only.cpp +++ b/s/s_only.cpp @@ -31,7 +31,7 @@ namespace mongo { boost::thread_specific_ptr<Client> currentClient; - Client::Client(const char *desc , MessagingPort *p) : + Client::Client(const char *desc , AbstractMessagingPort *p) : _context(0), _shutdown(false), _desc(desc), @@ -42,7 +42,7 @@ namespace mongo { Client::~Client() {} bool Client::shutdown() { return true; } - Client& Client::initThread(const char *desc, MessagingPort *mp) { + Client& Client::initThread(const char *desc, AbstractMessagingPort *mp) { setThreadName(desc); assert( currentClient.get() == 0 ); Client *c = new Client(desc, mp); @@ -85,8 +85,13 @@ namespace mongo { log( 2 ) << "command: " << cmdObj << endl; } + if (!client.getAuthenticationInfo()->isAuthorized(dbname)) { + result.append("errmsg" , "unauthorized"); + return false; + } + string errmsg; - int ok = c->run( dbname , cmdObj , errmsg , result , fromRepl ); + int ok = c->run( dbname , cmdObj , queryOptions, errmsg , result , fromRepl ); if ( ! ok ) result.append( "errmsg" , errmsg ); return ok; diff --git a/s/security.cpp b/s/security.cpp new file mode 100644 index 0000000..0b8954e --- /dev/null +++ b/s/security.cpp @@ -0,0 +1,112 @@ +// security.cpp +/* + * Copyright (C) 2010 10gen Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License, version 3, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +// security.cpp + +#include "pch.h" +#include "../db/security_common.h" +#include "../db/security.h" +#include "config.h" +#include "client.h" +#include "grid.h" + +// this is the _mongos only_ implementation of security.h + +namespace mongo { + + bool AuthenticationInfo::_warned; + + bool CmdAuthenticate::getUserObj(const string& dbname, const string& user, BSONObj& userObj, string& pwd) { + if (user == internalSecurity.user) { + uassert(15890, "key file must be used to log in with internal user", cmdLine.keyFile); + pwd = internalSecurity.pwd; + } + else { + string systemUsers = dbname + ".system.users"; + DBConfigPtr config = grid.getDBConfig( systemUsers ); + Shard s = config->getShard( systemUsers ); + + static BSONObj userPattern = BSON("user" << 1); + + ShardConnection conn( s, systemUsers ); + OCCASIONALLY conn->ensureIndex(systemUsers, userPattern, false, "user_1"); + { + BSONObjBuilder b; + b << "user" << user; + BSONObj query = b.done(); + userObj = conn->findOne(systemUsers, query); + if( userObj.isEmpty() ) { + log() << "auth: couldn't find user " << user << ", " << systemUsers << endl; + conn.done(); // return to pool + return false; + } + } + + pwd = userObj.getStringField("pwd"); + + conn.done(); // return to pool + } + return true; + } + + void CmdAuthenticate::authenticate(const string& dbname, const string& user, const bool readOnly) { + AuthenticationInfo *ai = ClientInfo::get()->getAuthenticationInfo(); + + if ( readOnly ) { + ai->authorizeReadOnly( dbname , user ); + } + else { + ai->authorize( dbname , user ); + } + } + + bool AuthenticationInfo::_isAuthorizedSpecialChecks( const string& dbname ) const { + if ( !isLocalHost ) { + return false; + } + + string adminNs = "admin.system.users"; + + DBConfigPtr config = grid.getDBConfig( adminNs ); + Shard s = config->getShard( adminNs ); + + ShardConnection conn( s, adminNs ); + BSONObj result = conn->findOne("admin.system.users", Query()); + if( result.isEmpty() ) { + if( ! _warned ) { + // you could get a few of these in a race, but that's ok + _warned = true; + log() << "note: no users configured in admin.system.users, allowing localhost access" << endl; + } + + // Must return conn to pool + // TODO: Check for errors during findOne(), or just let the conn die? + conn.done(); + return true; + } + + // Must return conn to pool + conn.done(); + return false; + } + + bool CmdLogout::run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { + AuthenticationInfo *ai = ClientInfo::get()->getAuthenticationInfo(); + ai->logout(dbname); + return true; + } +} diff --git a/s/server.cpp b/s/server.cpp index 51f30f1..a6ffab9 100644 --- a/s/server.cpp +++ b/s/server.cpp @@ -17,15 +17,18 @@ */ #include "pch.h" -#include "../util/message.h" +#include "../util/net/message.h" #include "../util/unittest.h" #include "../client/connpool.h" -#include "../util/message_server.h" +#include "../util/net/message_server.h" #include "../util/stringutils.h" #include "../util/version.h" +#include "../util/ramlog.h" #include "../util/signal_handlers.h" #include "../util/admin_access.h" +#include "../util/concurrency/task.h" #include "../db/dbwebserver.h" +#include "../scripting/engine.h" #include "server.h" #include "request.h" @@ -43,6 +46,7 @@ namespace mongo { Database *database = 0; string mongosCommand; bool dbexitCalled = false; + static bool scriptingEnabled = true; bool inShutdown() { return dbexitCalled; @@ -65,20 +69,18 @@ namespace mongo { out() << endl; } - class ShardingConnectionHook : public DBConnectionHook { - public: - - virtual void onHandedOut( DBClientBase * conn ) { - ClientInfo::get()->addShard( conn->getServerAddress() ); - } - } shardingConnectionHook; + void ShardingConnectionHook::onHandedOut( DBClientBase * conn ) { + ClientInfo::get()->addShard( conn->getServerAddress() ); + } class ShardedMessageHandler : public MessageHandler { public: virtual ~ShardedMessageHandler() {} virtual void connected( AbstractMessagingPort* p ) { - assert( ClientInfo::get() ); + ClientInfo *c = ClientInfo::get(); + massert(15849, "client info not defined", c); + c->getAuthenticationInfo()->isLocalHost = p->remote().isLocalHost(); } virtual void process( Message& m , AbstractMessagingPort* p , LastError * le) { @@ -93,7 +95,7 @@ namespace mongo { r.process(); } catch ( AssertionException & e ) { - log( e.isUserAssertion() ? 1 : 0 ) << "AssertionException in process: " << e.what() << endl; + log( e.isUserAssertion() ? 1 : 0 ) << "AssertionException while processing op type : " << m.operation() << " to : " << r.getns() << causedBy(e) << endl; le->raiseError( e.getCode() , e.what() ); @@ -147,6 +149,7 @@ namespace mongo { setupSIGTRAPforGDB(); setupCoreSignals(); setupSignals( false ); + Logstream::get().addGlobalTee( new RamLog("global") ); } void start( const MessageServer::Options& opts ) { @@ -154,10 +157,8 @@ namespace mongo { installChunkShardVersioning(); balancer.go(); cursorCache.startTimeoutThread(); + PeriodicTask::theRunner->go(); - log() << "waiting for connections on port " << cmdLine.port << endl; - //DbGridListener l(port); - //l.listen(); ShardedMessageHandler handler; MessageServer * server = createServer( opts , &handler ); server->setAsTimeTracker(); @@ -201,6 +202,7 @@ int _main(int argc, char* argv[]) { ( "chunkSize" , po::value<int>(), "maximum amount of data per chunk" ) ( "ipv6", "enable IPv6 support (disabled by default)" ) ( "jsonp","allow JSONP access via http (has security implications)" ) + ("noscripting", "disable scripting engine") ; options.add(sharding_options); @@ -242,6 +244,10 @@ int _main(int argc, char* argv[]) { return 0; } + if (params.count("noscripting")) { + scriptingEnabled = false; + } + if ( ! params.count( "configdb" ) ) { out() << "error: no args for --configdb" << endl; return 4; @@ -254,7 +260,7 @@ int _main(int argc, char* argv[]) { return 5; } - // we either have a seeting were all process are in localhost or none is + // we either have a setting where all processes are in localhost or none are for ( vector<string>::const_iterator it = configdbs.begin() ; it != configdbs.end() ; ++it ) { try { @@ -278,8 +284,12 @@ int _main(int argc, char* argv[]) { // set some global state - pool.addHook( &shardingConnectionHook ); + pool.addHook( new ShardingConnectionHook( false ) ); pool.setName( "mongos connectionpool" ); + + shardConnectionPool.addHook( new ShardingConnectionHook( true ) ); + shardConnectionPool.setName( "mongos shardconnection connectionpool" ); + DBClientConnection::setLazyKillCursor( false ); @@ -309,6 +319,16 @@ int _main(int argc, char* argv[]) { return 8; } + { + class CheckConfigServers : public task::Task { + virtual string name() const { return "CheckConfigServers"; } + virtual void doWork() { configServer.ok(true); } + }; + static CheckConfigServers checkConfigServers; + + task::repeat(&checkConfigServers, 60*1000); + } + int configError = configServer.checkConfigVersion( params.count( "upgrade" ) ); if ( configError ) { if ( configError > 0 ) { @@ -325,6 +345,12 @@ int _main(int argc, char* argv[]) { boost::thread web( boost::bind(&webServerThread, new NoAdminAccess() /* takes ownership */) ); + if ( scriptingEnabled ) { + ScriptEngine::setup(); +// globalScriptEngine->setCheckInterruptCallback( jsInterruptCallback ); +// globalScriptEngine->setGetInterruptSpecCallback( jsGetInterruptSpecCallback ); + } + MessageServer::Options opts; opts.port = cmdLine.port; opts.ipList = cmdLine.bind_ip; @@ -335,6 +361,7 @@ int _main(int argc, char* argv[]) { } int main(int argc, char* argv[]) { try { + doPreServerStatupInits(); return _main(argc, argv); } catch(DBException& e) { @@ -352,6 +379,12 @@ int main(int argc, char* argv[]) { } #undef exit + +void mongo::exitCleanly( ExitCode code ) { + // TODO: do we need to add anything? + mongo::dbexit( code ); +} + void mongo::dbexit( ExitCode rc, const char *why, bool tryToGetLock ) { dbexitCalled = true; log() << "dbexit: " << why @@ -17,7 +17,7 @@ */ #include <string> -#include "../util/message.h" +#include "../util/net/message.h" #include "../db/jsobj.h" namespace mongo { diff --git a/s/shard.cpp b/s/shard.cpp index c1e3b56..75326e0 100644 --- a/s/shard.cpp +++ b/s/shard.cpp @@ -20,6 +20,7 @@ #include "shard.h" #include "config.h" #include "request.h" +#include "client.h" #include "../db/commands.h" #include <set> @@ -111,6 +112,14 @@ namespace mongo { return i->second; } + // Useful for ensuring our shard data will not be modified while we use it + Shard findCopy( const string& ident ){ + ShardPtr found = find( ident ); + scoped_lock lk( _mutex ); + massert( 13128 , (string)"can't find shard for: " + ident , found.get() ); + return *found.get(); + } + void set( const string& name , const Shard& s , bool setName = true , bool setAddr = true ) { scoped_lock lk( _mutex ); ShardPtr ss( new Shard( s ) ); @@ -226,7 +235,7 @@ namespace mongo { virtual bool slaveOk() const { return true; } virtual bool adminOnly() const { return true; } - virtual bool run(const string&, mongo::BSONObj&, std::string& errmsg , mongo::BSONObjBuilder& result, bool) { + virtual bool run(const string&, mongo::BSONObj&, int, std::string& errmsg , mongo::BSONObjBuilder& result, bool) { return staticShardInfo.getShardMap( result , errmsg ); } } cmdGetShardMap; @@ -243,10 +252,7 @@ namespace mongo { void Shard::_rsInit() { if ( _cs.type() == ConnectionString::SET ) { string x = _cs.getSetName(); - if ( x.size() == 0 ) { - warning() << "no set name for shard: " << _name << " " << _cs.toString() << endl; - } - assert( x.size() ); + massert( 14807 , str::stream() << "no set name for shard: " << _name << " " << _cs.toString() , x.size() ); _rs = ReplicaSetMonitor::get( x , _cs.getServers() ); } } @@ -260,14 +266,9 @@ namespace mongo { } void Shard::reset( const string& ident ) { - ShardPtr s = staticShardInfo.find( ident ); - massert( 13128 , (string)"can't find shard for: " + ident , s->ok() ); - _name = s->_name; - _addr = s->_addr; - _cs = s->_cs; + *this = staticShardInfo.findCopy( ident ); + _rs.reset(); _rsInit(); - _maxSize = s->_maxSize; - _isDraining = s->_isDraining; } bool Shard::containsNode( const string& node ) const { @@ -289,10 +290,10 @@ namespace mongo { } void Shard::printShardInfo( ostream& out ) { - vector<ShardPtr> all; + vector<Shard> all; staticShardInfo.getAllShards( all ); for ( unsigned i=0; i<all.size(); i++ ) - out << all[i]->toString() << "\n"; + out << all[i].toString() << "\n"; out.flush(); } @@ -324,7 +325,7 @@ namespace mongo { } Shard Shard::pick( const Shard& current ) { - vector<ShardPtr> all; + vector<Shard> all; staticShardInfo.getAllShards( all ); if ( all.size() == 0 ) { staticShardInfo.reload(); @@ -334,18 +335,18 @@ namespace mongo { } // if current shard was provided, pick a different shard only if it is a better choice - ShardStatus best = all[0]->getStatus(); + ShardStatus best = all[0].getStatus(); if ( current != EMPTY ) { best = current.getStatus(); } for ( size_t i=0; i<all.size(); i++ ) { - ShardStatus t = all[i]->getStatus(); + ShardStatus t = all[i].getStatus(); if ( t < best ) best = t; } - log(1) << "best shard for new allocation is " << best << endl; + LOG(1) << "best shard for new allocation is " << best << endl; return best.shard(); } @@ -356,4 +357,20 @@ namespace mongo { _writeLock = 0; // TODO } + void ShardingConnectionHook::onCreate( DBClientBase * conn ) { + if( !noauth ) { + string err; + LOG(2) << "calling onCreate auth for " << conn->toString() << endl; + uassert( 15847, "can't authenticate to shard server", + conn->auth("local", internalSecurity.user, internalSecurity.pwd, err, false)); + } + + if ( _shardedConnections ) { + conn->simpleCommand( "admin" , 0 , "setShardVersion" ); + } + } + + void ShardingConnectionHook::onDestory( DBClientBase * conn ) { + resetShardVersionCB( conn ); + } } @@ -255,6 +255,8 @@ namespace mongo { _setVersion = false; _finishedInit = true; } + + bool ok() const { return _conn > 0; } /** this just passes through excpet it checks for stale configs @@ -275,4 +277,21 @@ namespace mongo { DBClientBase* _conn; bool _setVersion; }; + + + extern DBConnectionPool shardConnectionPool; + + class ShardingConnectionHook : public DBConnectionHook { + public: + + ShardingConnectionHook( bool shardedConnections ) + : _shardedConnections( shardedConnections ) { + } + + virtual void onCreate( DBClientBase * conn ); + virtual void onHandedOut( DBClientBase * conn ); + virtual void onDestory( DBClientBase * conn ); + + bool _shardedConnections; + }; } diff --git a/s/shard_version.cpp b/s/shard_version.cpp index a189a08..8782c8e 100644 --- a/s/shard_version.cpp +++ b/s/shard_version.cpp @@ -82,29 +82,54 @@ namespace mongo { /** * @return true if had to do something */ - bool checkShardVersion( DBClientBase& conn , const string& ns , bool authoritative , int tryNumber ) { + bool checkShardVersion( DBClientBase& conn_in , const string& ns , bool authoritative , int tryNumber ) { // TODO: cache, optimize, etc... - WriteBackListener::init( conn ); + WriteBackListener::init( conn_in ); DBConfigPtr conf = grid.getDBConfig( ns ); if ( ! conf ) return false; + DBClientBase* conn = 0; + + switch ( conn_in.type() ) { + case ConnectionString::INVALID: + assert(0); + break; + case ConnectionString::MASTER: + // great + conn = &conn_in; + break; + case ConnectionString::PAIR: + assert( ! "pair not support for sharding" ); + break; + case ConnectionString::SYNC: + // TODO: we should check later that we aren't actually sharded on this + conn = &conn_in; + break; + case ConnectionString::SET: + DBClientReplicaSet* set = (DBClientReplicaSet*)&conn_in; + conn = &(set->masterConn()); + break; + } + + assert(conn); + unsigned long long officialSequenceNumber = 0; ChunkManagerPtr manager; const bool isSharded = conf->isSharded( ns ); if ( isSharded ) { - manager = conf->getChunkManager( ns , authoritative ); + manager = conf->getChunkManagerIfExists( ns , authoritative ); // It's possible the chunk manager was reset since we checked whether sharded was true, // so must check this here. if( manager ) officialSequenceNumber = manager->getSequenceNumber(); } // has the ChunkManager been reloaded since the last time we updated the connection-level version? - // (ie, last time we issued the setShardVersions below) - unsigned long long sequenceNumber = connectionShardStatus.getSequence(&conn,ns); + // (ie., last time we issued the setShardVersions below) + unsigned long long sequenceNumber = connectionShardStatus.getSequence(conn,ns); if ( sequenceNumber == officialSequenceNumber ) { return false; } @@ -112,40 +137,53 @@ namespace mongo { ShardChunkVersion version = 0; if ( isSharded && manager ) { - version = manager->getVersion( Shard::make( conn.getServerAddress() ) ); + version = manager->getVersion( Shard::make( conn->getServerAddress() ) ); } - log(2) << " have to set shard version for conn: " << &conn << " ns:" << ns + LOG(2) << " have to set shard version for conn: " << conn << " ns:" << ns << " my last seq: " << sequenceNumber << " current: " << officialSequenceNumber << " version: " << version << " manager: " << manager.get() << endl; BSONObj result; - if ( setShardVersion( conn , ns , version , authoritative , result ) ) { + if ( setShardVersion( *conn , ns , version , authoritative , result ) ) { // success! - log(1) << " setShardVersion success!" << endl; - connectionShardStatus.setSequence( &conn , ns , officialSequenceNumber ); + LOG(1) << " setShardVersion success: " << result << endl; + connectionShardStatus.setSequence( conn , ns , officialSequenceNumber ); return true; } - log(1) << " setShardVersion failed!\n" << result << endl; + LOG(1) << " setShardVersion failed!\n" << result << endl; - if ( result.getBoolField( "need_authoritative" ) ) + if ( result["need_authoritative"].trueValue() ) massert( 10428 , "need_authoritative set but in authoritative mode already" , ! authoritative ); if ( ! authoritative ) { - checkShardVersion( conn , ns , 1 , tryNumber + 1 ); + checkShardVersion( *conn , ns , 1 , tryNumber + 1 ); return true; } + + if ( result["reloadConfig"].trueValue() ) { + if( result["version"].timestampTime() == 0 ){ + // reload db + conf->reload(); + } + else { + // reload config + conf->getChunkManager( ns , true ); + } + } - if ( tryNumber < 4 ) { - log(1) << "going to retry checkShardVersion" << endl; - sleepmillis( 10 ); - checkShardVersion( conn , ns , 1 , tryNumber + 1 ); + const int maxNumTries = 7; + if ( tryNumber < maxNumTries ) { + LOG( tryNumber < ( maxNumTries / 2 ) ? 1 : 0 ) + << "going to retry checkShardVersion host: " << conn->getServerAddress() << " " << result << endl; + sleepmillis( 10 * tryNumber ); + checkShardVersion( *conn , ns , true , tryNumber + 1 ); return true; } - string errmsg = str::stream() << "setShardVersion failed host[" << conn.getServerAddress() << "] " << result; + string errmsg = str::stream() << "setShardVersion failed host: " << conn->getServerAddress() << " " << result; log() << " " << errmsg << endl; massert( 10429 , errmsg , 0 ); return true; diff --git a/s/shard_version.h b/s/shard_version.h index 023b7fc..98cacf6 100644 --- a/s/shard_version.h +++ b/s/shard_version.h @@ -28,4 +28,5 @@ namespace mongo { */ void installChunkShardVersioning(); + } // namespace mongo diff --git a/s/shardconnection.cpp b/s/shardconnection.cpp index ec14139..04b49f2 100644 --- a/s/shardconnection.cpp +++ b/s/shardconnection.cpp @@ -41,12 +41,14 @@ namespace mongo { boost::function4<bool, DBClientBase&, const string&, bool, int> checkShardVersionCB = defaultCheckShardVersion; boost::function1<void, DBClientBase*> resetShardVersionCB = defaultResetShardVersion; + DBConnectionPool shardConnectionPool; + // Only print the non-top-level-shard-conn warning once if not verbose volatile bool printedShardConnWarning = false; /** * holds all the actual db connections for a client to various servers - * 1 pre thread, so don't have to worry about thread safety + * 1 per thread, so doesn't have to be thread safe */ class ClientConnections : boost::noncopyable { public: @@ -68,8 +70,10 @@ namespace mongo { if ( ss->avail ) { /* if we're shutting down, don't want to initiate release mechanism as it is slow, and isn't needed since all connections will be closed anyway */ - if ( inShutdown() ) + if ( inShutdown() ) { + resetShardVersionCB( ss->avail ); delete ss->avail; + } else release( addr , ss->avail ); ss->avail = 0; @@ -115,12 +119,12 @@ namespace mongo { if ( s->avail ) { DBClientBase* c = s->avail; s->avail = 0; - pool.onHandedOut( c ); + shardConnectionPool.onHandedOut( c ); return c; } s->created++; - return pool.get( addr ); + return shardConnectionPool.get( addr ); } void done( const string& addr , DBClientBase* conn ) { @@ -137,15 +141,10 @@ namespace mongo { for ( HostMap::iterator i=_hosts.begin(); i!=_hosts.end(); ++i ) { string addr = i->first; Status* ss = i->second; - - if ( ss->avail ) { + if ( ss->avail ) ss->avail->getLastError(); - release( addr , ss->avail ); - ss->avail = 0; - } - delete ss; + } - _hosts.clear(); } void checkVersions( const string& ns ) { @@ -157,14 +156,14 @@ namespace mongo { for ( unsigned i=0; i<all.size(); i++ ) { string sconnString = all[i].getConnString(); - Status* &s = _hosts[ sconnString ]; + Status* &s = _hosts[sconnString]; if ( ! s ){ s = new Status(); } if( ! s->avail ) - s->avail = pool.get( sconnString ); + s->avail = shardConnectionPool.get( sconnString ); checkShardVersionCB( *s->avail, ns, false, 1 ); @@ -172,27 +171,7 @@ namespace mongo { } void release( const string& addr , DBClientBase * conn ) { - resetShardVersionCB( conn ); - BSONObj res; - - try { - if ( conn->simpleCommand( "admin" , &res , "unsetSharding" ) ) { - pool.release( addr , conn ); - } - else { - error() << "unset sharding failed : " << res << endl; - delete conn; - } - } - catch ( SocketException& e ) { - // server down or something - LOG(1) << "socket exception trying to unset sharding: " << e.toString() << endl; - delete conn; - } - catch ( std::exception& e ) { - error() << "couldn't unset sharding : " << e.what() << endl; - delete conn; - } + shardConnectionPool.release( addr , conn ); } void _check( const string& ns ) { diff --git a/s/shardkey.cpp b/s/shardkey.cpp index 84cdb4b..d6c8eda 100644 --- a/s/shardkey.cpp +++ b/s/shardkey.cpp @@ -55,7 +55,8 @@ namespace mongo { */ for(set<string>::const_iterator it = patternfields.begin(); it != patternfields.end(); ++it) { - if(obj.getFieldDotted(it->c_str()).eoo()) + BSONElement e = obj.getFieldDotted(it->c_str()); + if(e.eoo() || e.type() == Array) return false; } return true; @@ -83,7 +84,7 @@ namespace mongo { vector<const char*> keysToMove; keysToMove.push_back("_id"); BSONForEach(e, pattern) { - if (strchr(e.fieldName(), '.') == NULL) + if (strchr(e.fieldName(), '.') == NULL && strcmp(e.fieldName(), "_id") != 0) keysToMove.push_back(e.fieldName()); } @@ -93,6 +94,7 @@ namespace mongo { } else { BufBuilder buf (obj.objsize()); + buf.appendNum((unsigned)0); // refcount buf.appendNum(obj.objsize()); vector<pair<const char*, size_t> > copies; @@ -135,7 +137,7 @@ namespace mongo { buf.appendChar('\0'); - BSONObj out (buf.buf(), true); + BSONObj out ((BSONObj::Holder*)buf.buf()); buf.decouple(); return out; } @@ -184,8 +186,8 @@ namespace mongo { ShardKeyPattern k( fromjson("{a:1,'sub.b':-1,'sub.c':1}") ); BSONObj x = fromjson("{a:1,'sub.b':2,'sub.c':3}"); - assert( k.extractKey( fromjson("{a:1,sub:{b:2,c:3}}") ).woEqual(x) ); - assert( k.extractKey( fromjson("{sub:{b:2,c:3},a:1}") ).woEqual(x) ); + assert( k.extractKey( fromjson("{a:1,sub:{b:2,c:3}}") ).binaryEqual(x) ); + assert( k.extractKey( fromjson("{sub:{b:2,c:3},a:1}") ).binaryEqual(x) ); } void moveToFrontTest() { ShardKeyPattern sk (BSON("a" << 1 << "b" << 1)); @@ -193,13 +195,13 @@ namespace mongo { BSONObj ret; ret = sk.moveToFront(BSON("z" << 1 << "_id" << 1 << "y" << 1 << "a" << 1 << "x" << 1 << "b" << 1 << "w" << 1)); - assert(ret.woEqual(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1))); + assert(ret.binaryEqual(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1))); ret = sk.moveToFront(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1)); - assert(ret.woEqual(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1))); + assert(ret.binaryEqual(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1))); ret = sk.moveToFront(BSON("z" << 1 << "y" << 1 << "a" << 1 << "b" << 1 << "Z" << 1 << "Y" << 1)); - assert(ret.woEqual(BSON("a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "Z" << 1 << "Y" << 1))); + assert(ret.binaryEqual(BSON("a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "Z" << 1 << "Y" << 1))); } @@ -262,7 +264,7 @@ namespace mongo { moveToFrontBenchmark(100); } - log(1) << "shardKeyTest passed" << endl; + LOG(1) << "shardKeyTest passed" << endl; } } shardKeyTest; diff --git a/s/shardkey.h b/s/shardkey.h index 96301ff..976cff0 100644 --- a/s/shardkey.h +++ b/s/shardkey.h @@ -102,7 +102,21 @@ namespace mongo { }; inline BSONObj ShardKeyPattern::extractKey(const BSONObj& from) const { - BSONObj k = from.extractFields(pattern); + BSONObj k = from; + bool needExtraction = false; + + BSONObjIterator a(from); + BSONObjIterator b(pattern); + while (a.more() && b.more()){ + if (strcmp(a.next().fieldName(), b.next().fieldName()) != 0){ + needExtraction = true; + break; + } + } + + if (needExtraction || a.more() != b.more()) + k = from.extractFields(pattern); + uassert(13334, "Shard Key must be less than 512 bytes", k.objsize() < 512); return k; } diff --git a/s/strategy.cpp b/s/strategy.cpp index 7c1fb0b..4230b7f 100644 --- a/s/strategy.cpp +++ b/s/strategy.cpp @@ -38,7 +38,7 @@ namespace mongo { conn.donotCheckVersion(); else if ( conn.setVersion() ) { conn.done(); - throw StaleConfigException( r.getns() , "doWRite" , true ); + throw StaleConfigException( r.getns() , "doWrite" , true ); } conn->say( r.m() ); conn.done(); @@ -46,6 +46,8 @@ namespace mongo { void Strategy::doQuery( Request& r , const Shard& shard ) { + r.checkAuth(); + ShardConnection dbcon( shard , r.getns() ); DBClientBase &c = dbcon.conn(); @@ -67,13 +69,31 @@ namespace mongo { dbcon.done(); } - void Strategy::insert( const Shard& shard , const char * ns , const BSONObj& obj ) { + void Strategy::insert( const Shard& shard , const char * ns , const BSONObj& obj , int flags, bool safe ) { ShardConnection dbcon( shard , ns ); if ( dbcon.setVersion() ) { dbcon.done(); throw StaleConfigException( ns , "for insert" ); } - dbcon->insert( ns , obj ); + dbcon->insert( ns , obj , flags); + if (safe) + dbcon->getLastError(); dbcon.done(); } + + void Strategy::update( const Shard& shard , const char * ns , const BSONObj& query , const BSONObj& toupdate , int flags, bool safe ) { + bool upsert = flags & UpdateOption_Upsert; + bool multi = flags & UpdateOption_Multi; + + ShardConnection dbcon( shard , ns ); + if ( dbcon.setVersion() ) { + dbcon.done(); + throw StaleConfigException( ns , "for insert" ); + } + dbcon->update( ns , query , toupdate, upsert, multi); + if (safe) + dbcon->getLastError(); + dbcon.done(); + } + } diff --git a/s/strategy.h b/s/strategy.h index 10a5a3f..326a515 100644 --- a/s/strategy.h +++ b/s/strategy.h @@ -32,11 +32,15 @@ namespace mongo { virtual void getMore( Request& r ) = 0; virtual void writeOp( int op , Request& r ) = 0; + virtual void insertSharded( DBConfigPtr conf, const char* ns, BSONObj& o, int flags, bool safe=false, const char* nsChunkLookup=0 ) = 0; + virtual void updateSharded( DBConfigPtr conf, const char* ns, BSONObj& query, BSONObj& toupdate, int flags, bool safe=false ) = 0; + protected: void doWrite( int op , Request& r , const Shard& shard , bool checkVersion = true ); void doQuery( Request& r , const Shard& shard ); - void insert( const Shard& shard , const char * ns , const BSONObj& obj ); + void insert( const Shard& shard , const char * ns , const BSONObj& obj , int flags=0 , bool safe=false ); + void update( const Shard& shard , const char * ns , const BSONObj& query , const BSONObj& toupdate , int flags=0, bool safe=false ); }; diff --git a/s/strategy_shard.cpp b/s/strategy_shard.cpp index 337fa58..c6b30e7 100644 --- a/s/strategy_shard.cpp +++ b/s/strategy_shard.cpp @@ -35,7 +35,9 @@ namespace mongo { virtual void queryOp( Request& r ) { QueryMessage q( r.d() ); - log(3) << "shard query: " << q.ns << " " << q.query << endl; + r.checkAuth(); + + LOG(3) << "shard query: " << q.ns << " " << q.query << endl; if ( q.ntoreturn == 1 && strstr(q.ns, ".$cmd") ) throw UserException( 8010 , "something is wrong, shouldn't see a command here" ); @@ -66,20 +68,14 @@ namespace mongo { ClusteredCursor * cursor = 0; BSONObj sort = query.getSort(); - - if ( sort.isEmpty() ) { - cursor = new SerialServerClusteredCursor( servers , q ); - } - else { - cursor = new ParallelSortClusteredCursor( servers , q , sort ); - } + cursor = new ParallelSortClusteredCursor( servers , q , sort ); assert( cursor ); try { cursor->init(); - log(5) << " cursor type: " << cursor->type() << endl; + LOG(5) << " cursor type: " << cursor->type() << endl; shardedCursorTypes.hit( cursor->type() ); if ( query.isExplain() ) { @@ -98,7 +94,7 @@ namespace mongo { if ( ! cc->sendNextBatch( r ) ) { return; } - log(6) << "storing cursor : " << cc->getId() << endl; + LOG(6) << "storing cursor : " << cc->getId() << endl; cursorCache.store( cc ); } @@ -106,11 +102,11 @@ namespace mongo { int ntoreturn = r.d().pullInt(); long long id = r.d().pullInt64(); - log(6) << "want cursor : " << id << endl; + LOG(6) << "want cursor : " << id << endl; ShardedClientCursorPtr cursor = cursorCache.get( id ); if ( ! cursor ) { - log(6) << "\t invalid cursor :(" << endl; + LOG(6) << "\t invalid cursor :(" << endl; replyToQuery( ResultFlag_CursorNotFound , r.p() , r.m() , 0 , 0 , 0 ); return; } @@ -126,56 +122,126 @@ namespace mongo { } void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ) { + const int flags = d.reservedField(); + bool keepGoing = flags & InsertOption_ContinueOnError; // modified before assertion if should abort while ( d.moreJSObjs() ) { - BSONObj o = d.nextJsObj(); - if ( ! manager->hasShardKey( o ) ) { + try { + BSONObj o = d.nextJsObj(); + if ( ! manager->hasShardKey( o ) ) { + + bool bad = true; - bool bad = true; + if ( manager->getShardKey().partOfShardKey( "_id" ) ) { + BSONObjBuilder b; + b.appendOID( "_id" , 0 , true ); + b.appendElements( o ); + o = b.obj(); + bad = ! manager->hasShardKey( o ); + } + + if ( bad ) { + log() << "tried to insert object with no valid shard key: " << r.getns() << " " << o << endl; + uasserted( 8011 , "tried to insert object with no valid shard key" ); + } - if ( manager->getShardKey().partOfShardKey( "_id" ) ) { - BSONObjBuilder b; - b.appendOID( "_id" , 0 , true ); - b.appendElements( o ); - o = b.obj(); - bad = ! manager->hasShardKey( o ); } - if ( bad ) { - log() << "tried to insert object without shard key: " << r.getns() << " " << o << endl; - throw UserException( 8011 , "tried to insert object without shard key" ); + // Many operations benefit from having the shard key early in the object + o = manager->getShardKey().moveToFront(o); + + const int maxTries = 30; + + bool gotThrough = false; + for ( int i=0; i<maxTries; i++ ) { + try { + ChunkPtr c = manager->findChunk( o ); + LOG(4) << " server:" << c->getShard().toString() << " " << o << endl; + insert( c->getShard() , r.getns() , o , flags); + + r.gotInsert(); + if ( r.getClientInfo()->autoSplitOk() ) + c->splitIfShould( o.objsize() ); + gotThrough = true; + break; + } + catch ( StaleConfigException& e ) { + int logLevel = i < ( maxTries / 2 ); + LOG( logLevel ) << "retrying insert because of StaleConfigException: " << e << " object: " << o << endl; + r.reset(); + + manager = r.getChunkManager(); + if( ! manager ) { + keepGoing = false; + uasserted(14804, "collection no longer sharded"); + } + + unsigned long long old = manager->getSequenceNumber(); + + LOG( logLevel ) << " sequence number - old: " << old << " new: " << manager->getSequenceNumber() << endl; + } + sleepmillis( i * 20 ); } + assert( inShutdown() || gotThrough ); // not caught below + } catch (const UserException&){ + if (!keepGoing || !d.moreJSObjs()){ + throw; + } + // otherwise ignore and keep going } + } + } - // Many operations benefit from having the shard key early in the object - o = manager->getShardKey().moveToFront(o); + void insertSharded( DBConfigPtr conf, const char* ns, BSONObj& o, int flags, bool safe, const char* nsChunkLookup ) { + if (!nsChunkLookup) + nsChunkLookup = ns; + ChunkManagerPtr manager = conf->getChunkManager(nsChunkLookup); + if ( ! manager->hasShardKey( o ) ) { - const int maxTries = 30; + bool bad = true; - bool gotThrough = false; - for ( int i=0; i<maxTries; i++ ) { - try { - ChunkPtr c = manager->findChunk( o ); - log(4) << " server:" << c->getShard().toString() << " " << o << endl; - insert( c->getShard() , r.getns() , o ); + if ( manager->getShardKey().partOfShardKey( "_id" ) ) { + BSONObjBuilder b; + b.appendOID( "_id" , 0 , true ); + b.appendElements( o ); + o = b.obj(); + bad = ! manager->hasShardKey( o ); + } - r.gotInsert(); - if ( r.getClientInfo()->autoSplitOk() ) - c->splitIfShould( o.objsize() ); - gotThrough = true; - break; - } - catch ( StaleConfigException& ) { - log( i < ( maxTries / 2 ) ) << "retrying insert because of StaleConfigException: " << o << endl; - r.reset(); - manager = r.getChunkManager(); - } - sleepmillis( i * 200 ); + if ( bad ) { + log() << "tried to insert object with no valid shard key: " << nsChunkLookup << " " << o << endl; + uasserted( 14842 , "tried to insert object with no valid shard key" ); + } + + } + + // Many operations benefit from having the shard key early in the object + o = manager->getShardKey().moveToFront(o); + + const int maxTries = 30; + + for ( int i=0; i<maxTries; i++ ) { + try { + ChunkPtr c = manager->findChunk( o ); + LOG(4) << " server:" << c->getShard().toString() << " " << o << endl; + insert( c->getShard() , ns , o , flags, safe); + break; } + catch ( StaleConfigException& e ) { + int logLevel = i < ( maxTries / 2 ); + LOG( logLevel ) << "retrying insert because of StaleConfigException: " << e << " object: " << o << endl; - assert( gotThrough ); + unsigned long long old = manager->getSequenceNumber(); + manager = conf->getChunkManagerIfExists(ns); + LOG( logLevel ) << " sequenece number - old: " << old << " new: " << manager->getSequenceNumber() << endl; + + if (!manager) { + uasserted(14843, "collection no longer sharded"); + } + } + sleepmillis( i * 20 ); } } @@ -186,16 +252,15 @@ namespace mongo { uassert( 13506 , "$atomic not supported sharded" , query["$atomic"].eoo() ); uassert( 10201 , "invalid update" , d.moreJSObjs() ); BSONObj toupdate = d.nextJsObj(); - BSONObj chunkFinder = query; bool upsert = flags & UpdateOption_Upsert; bool multi = flags & UpdateOption_Multi; if (upsert) { - uassert(8012, "can't upsert something without shard key", + uassert(8012, "can't upsert something without valid shard key", (manager->hasShardKey(toupdate) || - (toupdate.firstElement().fieldName()[0] == '$' && manager->hasShardKey(query)))); + (toupdate.firstElementFieldName()[0] == '$' && manager->hasShardKey(query)))); BSONObj key = manager->getShardKey().extractKey(query); BSONForEach(e, key) { @@ -207,8 +272,9 @@ namespace mongo { if ( ! manager->hasShardKey( query ) ) { if ( multi ) { } - else if ( strcmp( query.firstElement().fieldName() , "_id" ) || query.nFields() != 1 ) { - throw UserException( 8013 , "can't do non-multi update with query that doesn't have the shard key" ); + else if ( strcmp( query.firstElementFieldName() , "_id" ) || query.nFields() != 1 ) { + log() << "Query " << query << endl; + throw UserException( 8013 , "can't do non-multi update with query that doesn't have a valid shard key" ); } else { save = true; @@ -218,7 +284,7 @@ namespace mongo { if ( ! save ) { - if ( toupdate.firstElement().fieldName()[0] == '$' ) { + if ( toupdate.firstElementFieldName()[0] == '$' ) { BSONObjIterator ops(toupdate); while(ops.more()) { BSONElement op(ops.next()); @@ -241,7 +307,7 @@ namespace mongo { } else { uasserted(12376, - str::stream() << "shard key must be in update object for collection: " << manager->getns() ); + str::stream() << "valid shard key must be in update object for collection: " << manager->getns() ); } } @@ -268,10 +334,101 @@ namespace mongo { if ( left <= 0 ) throw e; left--; - log() << "update failed b/c of StaleConfigException, retrying " + log() << "update will be retried b/c sharding config info is stale, " << " left:" << left << " ns: " << r.getns() << " query: " << query << endl; r.reset( false ); manager = r.getChunkManager(); + uassert(14806, "collection no longer sharded", manager); + } + } + } + } + + void updateSharded( DBConfigPtr conf, const char* ns, BSONObj& query, BSONObj& toupdate, int flags, bool safe ) { + ChunkManagerPtr manager = conf->getChunkManager(ns); + BSONObj chunkFinder = query; + + bool upsert = flags & UpdateOption_Upsert; + bool multi = flags & UpdateOption_Multi; + + if (upsert) { + uassert(14854, "can't upsert something without valid shard key", + (manager->hasShardKey(toupdate) || + (toupdate.firstElementFieldName()[0] == '$' && manager->hasShardKey(query)))); + + BSONObj key = manager->getShardKey().extractKey(query); + BSONForEach(e, key) { + uassert(14855, "shard key in upsert query must be an exact match", getGtLtOp(e) == BSONObj::Equality); + } + } + + bool save = false; + if ( ! manager->hasShardKey( query ) ) { + if ( multi ) { + } + else if ( strcmp( query.firstElementFieldName() , "_id" ) || query.nFields() != 1 ) { + throw UserException( 14850 , "can't do non-multi update with query that doesn't have a valid shard key" ); + } + else { + save = true; + chunkFinder = toupdate; + } + } + + + if ( ! save ) { + if ( toupdate.firstElementFieldName()[0] == '$' ) { + BSONObjIterator ops(toupdate); + while(ops.more()) { + BSONElement op(ops.next()); + if (op.type() != Object) + continue; + BSONObjIterator fields(op.embeddedObject()); + while(fields.more()) { + const string field = fields.next().fieldName(); + uassert(14851, + str::stream() << "Can't modify shard key's value field" << field + << " for collection: " << manager->getns(), + ! manager->getShardKey().partOfShardKey(field)); + } + } + } + else if ( manager->hasShardKey( toupdate ) ) { + uassert( 14856, + str::stream() << "cannot modify shard key for collection: " << manager->getns(), + manager->getShardKey().compare( query , toupdate ) == 0 ); + } + else { + uasserted(14857, + str::stream() << "valid shard key must be in update object for collection: " << manager->getns() ); + } + } + + if ( multi ) { + set<Shard> shards; + manager->getShardsForQuery( shards , chunkFinder ); +// int * x = (int*)(r.d().afterNS()); +// x[0] |= UpdateOption_Broadcast; + for ( set<Shard>::iterator i=shards.begin(); i!=shards.end(); i++) { + update(*i, ns, query, toupdate, flags, safe); + } + } + else { + int left = 5; + while ( true ) { + try { + ChunkPtr c = manager->findChunk( chunkFinder ); + update(c->getShard(), ns, query, toupdate, flags); + break; + } + catch ( StaleConfigException& e ) { + if ( left <= 0 ) + throw e; + left--; + log() << "update will be retried b/c sharding config info is stale, " + << " left:" << left << " ns: " << ns << " query: " << query << endl; + manager = conf->getChunkManager(ns); + uassert(14849, "collection no longer sharded", manager); } } } @@ -293,7 +450,7 @@ namespace mongo { while ( true ) { try { manager->getShardsForQuery( shards , pattern ); - log(2) << "delete : " << pattern << " \t " << shards.size() << " justOne: " << justOne << endl; + LOG(2) << "delete : " << pattern << " \t " << shards.size() << " justOne: " << justOne << endl; if ( shards.size() == 1 ) { doWrite( dbDelete , r , *shards.begin() ); return; @@ -309,6 +466,7 @@ namespace mongo { r.reset( false ); shards.clear(); manager = r.getChunkManager(); + uassert(14805, "collection no longer sharded", manager); } } @@ -324,7 +482,7 @@ namespace mongo { virtual void writeOp( int op , Request& r ) { const char *ns = r.getns(); - log(3) << "write: " << ns << endl; + LOG(3) << "write: " << ns << endl; DbMessage& d = r.d(); ChunkManagerPtr info = r.getChunkManager(); diff --git a/s/strategy_single.cpp b/s/strategy_single.cpp index 3fd357a..012be5f 100644 --- a/s/strategy_single.cpp +++ b/s/strategy_single.cpp @@ -36,7 +36,7 @@ namespace mongo { virtual void queryOp( Request& r ) { QueryMessage q( r.d() ); - log(3) << "single query: " << q.ns << " " << q.query << " ntoreturn: " << q.ntoreturn << endl; + LOG(3) << "single query: " << q.ns << " " << q.query << " ntoreturn: " << q.ntoreturn << " options : " << q.queryOptions << endl; if ( r.isCommand() ) { @@ -47,7 +47,15 @@ namespace mongo { while ( true ) { BSONObjBuilder builder; try { - bool ok = Command::runAgainstRegistered(q.ns, q.query, builder); + BSONObj cmdObj = q.query; + { + BSONElement e = cmdObj.firstElement(); + if ( e.type() == Object && (e.fieldName()[0] == '$' + ? str::equals("query", e.fieldName()+1) + : str::equals("query", e.fieldName()))) + cmdObj = e.embeddedObject(); + } + bool ok = Command::runAgainstRegistered(q.ns, cmdObj, builder, q.queryOptions); if ( ok ) { BSONObj x = builder.done(); replyToQuery(0, r.p(), r.m(), x); @@ -73,7 +81,7 @@ namespace mongo { } } - string commandName = q.query.firstElement().fieldName(); + string commandName = q.query.firstElementFieldName(); uassert(13390, "unrecognized command: " + commandName, _commandsSafeToPass.count(commandName) != 0); } @@ -87,7 +95,10 @@ namespace mongo { LOG(3) << "single getmore: " << ns << endl; long long id = r.d().getInt64( 4 ); - + + // we used ScopedDbConnection because we don't get about config versions + // not deleting data is handled elsewhere + // and we don't want to call setShardVersion ScopedDbConnection conn( cursorCache.getRef( id ) ); Message response; @@ -150,12 +161,12 @@ namespace mongo { if ( r.isShardingEnabled() && strstr( ns , ".system.indexes" ) == strchr( ns , '.' ) && strchr( ns , '.' ) ) { - log(1) << " .system.indexes write for: " << ns << endl; + LOG(1) << " .system.indexes write for: " << ns << endl; handleIndexWrite( op , r ); return; } - log(3) << "single write: " << ns << endl; + LOG(3) << "single write: " << ns << endl; doWrite( op , r , r.primaryShard() ); r.gotInsert(); // Won't handle mulit-insert correctly. Not worth parsing the request. } @@ -251,6 +262,14 @@ namespace mongo { return true; } + void insertSharded( DBConfigPtr conf, const char* ns, BSONObj& o, int flags, bool safe, const char* nsChunkLookup ) { + // only useful for shards + } + + void updateSharded( DBConfigPtr conf, const char* ns, BSONObj& query, BSONObj& toupdate, int flags, bool safe ) { + // only useful for shards + } + set<string> _commandsSafeToPass; }; @@ -129,7 +129,7 @@ namespace mongo { virtual ~StaleConfigException() throw() {} - virtual void appendPrefix( stringstream& ss ) const { ss << "StaleConfigException: "; } + virtual void appendPrefix( stringstream& ss ) const { ss << "stale sharding config exception: "; } bool justConnection() const { return _justConnection; } diff --git a/s/writeback_listener.cpp b/s/writeback_listener.cpp index df7cc35..5f320d3 100644 --- a/s/writeback_listener.cpp +++ b/s/writeback_listener.cpp @@ -40,7 +40,8 @@ namespace mongo { mongo::mutex WriteBackListener::_seenWritebacksLock("WriteBackListener::seen"); WriteBackListener::WriteBackListener( const string& addr ) : _addr( addr ) { - log() << "creating WriteBackListener for: " << addr << endl; + _name = str::stream() << "WriteBackListener-" << addr; + log() << "creating WriteBackListener for: " << addr << " serverID: " << serverID << endl; } /* static */ @@ -88,16 +89,17 @@ namespace mongo { /* static */ BSONObj WriteBackListener::waitFor( const ConnectionIdent& ident, const OID& oid ) { Timer t; - for ( int i=0; i<5000; i++ ) { + for ( int i=0; i<10000; i++ ) { { scoped_lock lk( _seenWritebacksLock ); WBStatus s = _seenWritebacks[ident]; if ( oid < s.id ) { // this means we're waiting for a GLE that already passed. - // it should be impossible becauseonce we call GLE, no other + // it should be impossible because once we call GLE, no other // writebacks should happen with that connection id - msgasserted( 13633 , str::stream() << "got writeback waitfor for older id " << - " oid: " << oid << " s.id: " << s.id << " connection: " << ident.toString() ); + + msgasserted( 14041 , str::stream() << "got writeback waitfor for older id " << + " oid: " << oid << " s.id: " << s.id << " ident: " << ident.toString() ); } else if ( oid == s.id ) { return s.gle; @@ -115,7 +117,7 @@ namespace mongo { while ( ! inShutdown() ) { if ( ! Shard::isAShardNode( _addr ) ) { - log(1) << _addr << " is not a shard node" << endl; + LOG(1) << _addr << " is not a shard node" << endl; sleepsecs( 60 ); continue; } @@ -129,6 +131,7 @@ namespace mongo { BSONObjBuilder cmd; cmd.appendOID( "writebacklisten" , &serverID ); // Command will block for data if ( ! conn->runCommand( "admin" , cmd.obj() , result ) ) { + result = result.getOwned(); log() << "writebacklisten command failed! " << result << endl; conn.done(); continue; @@ -136,7 +139,7 @@ namespace mongo { } - log(1) << "writebacklisten result: " << result << endl; + LOG(1) << "writebacklisten result: " << result << endl; BSONObj data = result.getObjectField( "data" ); if ( data.getBoolField( "writeBack" ) ) { @@ -163,13 +166,12 @@ namespace mongo { ShardChunkVersion needVersion( data["version"] ); LOG(1) << "connectionId: " << cid << " writebackId: " << wid << " needVersion : " << needVersion.toString() - << " mine : " << db->getChunkManager( ns )->getVersion().toString() << endl;// TODO change to log(3) - - if ( logLevel ) log(1) << debugString( m ) << endl; + << " mine : " << db->getChunkManager( ns )->getVersion().toString() + << endl; - ShardChunkVersion start = db->getChunkManager( ns )->getVersion(); + LOG(1) << m.toString() << endl; - if ( needVersion.isSet() && needVersion <= start ) { + if ( needVersion.isSet() && needVersion <= db->getChunkManager( ns )->getVersion() ) { // this means when the write went originally, the version was old // if we're here, it means we've already updated the config, so don't need to do again //db->getChunkManager( ns , true ); // SERVER-1349 @@ -178,48 +180,60 @@ namespace mongo { // we received a writeback object that was sent to a previous version of a shard // the actual shard may not have the object the writeback operation is for // we need to reload the chunk manager and get the new shard versions - bool good = false; - for ( int i=0; i<100; i++ ) { - if ( db->getChunkManager( ns , true )->getVersion() >= needVersion ) { - good = true; - break; - } - log() << "writeback getChunkManager didn't update?" << endl; - sleepmillis(10); - } - assert( good ); + db->getChunkManager( ns , true ); } // do request and then call getLastError // we have to call getLastError so we can return the right fields to the user if they decide to call getLastError BSONObj gle; - try { - - Request r( m , 0 ); - r.init(); - - ClientInfo * ci = r.getClientInfo(); - ci->noAutoSplit(); - - r.process(); - - ci->newRequest(); // this so we flip prev and cur shards + int attempts = 0; + while ( true ) { + attempts++; + + try { + + Request r( m , 0 ); + r.init(); + + r.d().reservedField() |= DbMessage::Reserved_FromWriteback; + + ClientInfo * ci = r.getClientInfo(); + if (!noauth) { + ci->getAuthenticationInfo()->authorize("admin", internalSecurity.user); + } + ci->noAutoSplit(); + + r.process(); + + ci->newRequest(); // this so we flip prev and cur shards + + BSONObjBuilder b; + if ( ! ci->getLastError( BSON( "getLastError" << 1 ) , b , true ) ) { + b.appendBool( "commandFailed" , true ); + } + gle = b.obj(); + + if ( gle["code"].numberInt() == 9517 ) { + log() << "writeback failed because of stale config, retrying attempts: " << attempts << endl; + if( ! db->getChunkManagerIfExists( ns , true ) ){ + uassert( 15884, str::stream() << "Could not reload chunk manager after " << attempts << " attempts.", attempts <= 4 ); + sleepsecs( attempts - 1 ); + } + continue; + } - BSONObjBuilder b; - if ( ! ci->getLastError( BSON( "getLastError" << 1 ) , b , true ) ) { - b.appendBool( "commandFailed" , true ); + ci->clearSinceLastGetError(); } - gle = b.obj(); - - ci->clearSinceLastGetError(); - } - catch ( DBException& e ) { - error() << "error processing writeback: " << e << endl; - BSONObjBuilder b; - b.append( "err" , e.toString() ); - e.getInfo().append( b ); - gle = b.obj(); + catch ( DBException& e ) { + error() << "error processing writeback: " << e << endl; + BSONObjBuilder b; + b.append( "err" , e.toString() ); + e.getInfo().append( b ); + gle = b.obj(); + } + + break; } { diff --git a/s/writeback_listener.h b/s/writeback_listener.h index 0125073..1ef33da 100644 --- a/s/writeback_listener.h +++ b/s/writeback_listener.h @@ -31,7 +31,8 @@ namespace mongo { * (Wrong here in the sense that the target chunk moved before this mongos had a chance to * learn so.) It is responsible for reapplying these writes to the correct shard. * - * Currently, there is one listener per shard. + * Runs (instantiated) on mongos. + * Currently, there is one writebacklistener per shard. */ class WriteBackListener : public BackgroundJob { public: @@ -63,11 +64,12 @@ namespace mongo { protected: WriteBackListener( const string& addr ); - string name() const { return "WriteBackListener"; } + string name() const { return _name; } void run(); private: string _addr; + string _name; static mongo::mutex _cacheLock; // protects _cache static map<string,WriteBackListener*> _cache; // server to listener |