diff options
Diffstat (limited to 's')
-rw-r--r-- | s/balance.cpp | 6 | ||||
-rw-r--r-- | s/chunk.cpp | 2 | ||||
-rw-r--r-- | s/commands_public.cpp | 6 | ||||
-rw-r--r-- | s/config.cpp | 7 | ||||
-rw-r--r-- | s/d_migrate.cpp | 35 | ||||
-rw-r--r-- | s/grid.cpp | 4 |
6 files changed, 42 insertions, 18 deletions
diff --git a/s/balance.cpp b/s/balance.cpp index f79e1d8..33cafdf 100644 --- a/s/balance.cpp +++ b/s/balance.cpp @@ -54,13 +54,13 @@ namespace mongo { const BSONObj& chunkToMove = chunkInfo.chunk; ChunkPtr c = cm->findChunk( chunkToMove["min"].Obj() ); - if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) ){ + if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) || c->getMax().woCompare( chunkToMove["max"].Obj() ) ) { // likely a split happened somewhere - cm = cfg->getChunkManager( chunkInfo.ns , true ); + cm = cfg->getChunkManager( chunkInfo.ns , true /* reload */); assert( cm ); c = cm->findChunk( chunkToMove["min"].Obj() ); - if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) ){ + if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) || c->getMax().woCompare( chunkToMove["max"].Obj() ) ) { log() << "chunk mismatch after reload, ignoring will retry issue cm: " << c->getMin() << " min: " << chunkToMove["min"].Obj() << endl; continue; diff --git a/s/chunk.cpp b/s/chunk.cpp index cf1f992..87d7747 100644 --- a/s/chunk.cpp +++ b/s/chunk.cpp @@ -843,7 +843,6 @@ namespace mongo { _chunkMap.clear(); _chunkRanges.clear(); _shards.clear(); - // delete data from mongod for ( set<Shard>::iterator i=seen.begin(); i!=seen.end(); i++ ){ @@ -872,7 +871,6 @@ namespace mongo { conn.done(); } - log(1) << "ChunkManager::drop : " << _ns << "\t DONE" << endl; configServer.logChange( "dropCollection" , _ns , BSONObj() ); } diff --git a/s/commands_public.cpp b/s/commands_public.cpp index 91563d2..80d5cc9 100644 --- a/s/commands_public.cpp +++ b/s/commands_public.cpp @@ -183,7 +183,7 @@ namespace mongo { class DBStatsCmd : public RunOnAllShardsCommand { public: - DBStatsCmd() : RunOnAllShardsCommand("dbstats") {} + DBStatsCmd() : RunOnAllShardsCommand("dbStats", "dbstats") {} virtual void aggregateResults(const vector<BSONObj>& results, BSONObjBuilder& output) { long long objects = 0; @@ -438,7 +438,7 @@ namespace mongo { class CollectionStats : public PublicGridCommand { public: - CollectionStats() : PublicGridCommand("collstats") { } + CollectionStats() : PublicGridCommand("collStats", "collstats") { } bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){ string collection = cmdObj.firstElement().valuestrsafe(); string fullns = dbName + "." + collection; @@ -517,7 +517,7 @@ namespace mongo { class FindAndModifyCmd : public PublicGridCommand { public: - FindAndModifyCmd() : PublicGridCommand("findandmodify") { } + FindAndModifyCmd() : PublicGridCommand("findAndModify", "findandmodify") { } bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){ string collection = cmdObj.firstElement().valuestrsafe(); string fullns = dbName + "." + collection; diff --git a/s/config.cpp b/s/config.cpp index e1016a0..1ad15d5 100644 --- a/s/config.cpp +++ b/s/config.cpp @@ -62,6 +62,7 @@ namespace mongo { void DBConfig::CollectionInfo::shard( DBConfig * db , const string& ns , const ShardKeyPattern& key , bool unique ){ _cm.reset( new ChunkManager( db, ns , key , unique ) ); _dirty = true; + _dropped = false; } void DBConfig::CollectionInfo::unshard(){ @@ -81,10 +82,12 @@ namespace mongo { _cm->getInfo( val ); conn->update( ShardNS::collection , key , val.obj() , true ); + string err = conn->getLastError(); + uassert( 13473 , (string)"failed to save collection (" + ns + "): " + err , err.size() == 0 ); + _dirty = false; } - bool DBConfig::isSharded( const string& ns ){ if ( ! _shardingEnabled ) return false; @@ -124,7 +127,7 @@ namespace mongo { scoped_lock lk( _lock ); CollectionInfo& ci = _collections[ns]; - uassert( 8043 , "already sharded" , ! ci.isSharded() ); + uassert( 8043 , "collection already sharded" , ! ci.isSharded() ); log() << "enable sharding on: " << ns << " with shard key: " << fieldsAndOrder << endl; diff --git a/s/d_migrate.cpp b/s/d_migrate.cpp index b8ee78e..8e9584c 100644 --- a/s/d_migrate.cpp +++ b/s/d_migrate.cpp @@ -49,9 +49,11 @@ namespace mongo { class MoveTimingHelper { public: - MoveTimingHelper( const string& where , const string& ns ) + MoveTimingHelper( const string& where , const string& ns , BSONObj min , BSONObj max ) : _where( where ) , _ns( ns ){ _next = 1; + _b.append( "min" , min ); + _b.append( "max" , max ); } ~MoveTimingHelper(){ @@ -100,9 +102,11 @@ namespace mongo { log() << "moveChunk deleted: " << num << endl; } }; + + static const char * const cleanUpThreadName = "cleanupOldData"; void _cleanupOldData( OldDataCleanup cleanup ){ - Client::initThread( "cleanupOldData"); + Client::initThread( cleanUpThreadName ); log() << " (start) waiting to cleanup " << cleanup.ns << " from " << cleanup.min << " -> " << cleanup.max << " # cursors:" << cleanup.initial.size() << endl; int loops = 0; @@ -240,6 +244,14 @@ namespace mongo { switch ( opstr[0] ){ case 'd': { + + if ( getThreadName() == cleanUpThreadName ){ + // we don't want to xfer things we're cleaning + // as then they'll be deleted on TO + // which is bad + return; + } + // can't filter deletes :( scoped_lock lk( _mutex ); _deleted.push_back( ide.wrap() ); @@ -267,7 +279,7 @@ namespace mongo { } void xfer( list<BSONObj> * l , BSONObjBuilder& b , const char * name , long long& size , bool explode ){ - static long long maxSize = 1024 * 1024; + const long long maxSize = 1024 * 1024; if ( l->size() == 0 || size > maxSize ) return; @@ -437,7 +449,7 @@ namespace mongo { configServer.init( configdb ); } - MoveTimingHelper timing( "from" , ns ); + MoveTimingHelper timing( "from" , ns , min , max ); Shard fromShard( from ); Shard toShard( to ); @@ -702,13 +714,13 @@ namespace mongo { } void _go(){ - MoveTimingHelper timing( "to" , ns ); - assert( active ); assert( state == READY ); assert( ! min.isEmpty() ); assert( ! max.isEmpty() ); + MoveTimingHelper timing( "to" , ns , min , max ); + ScopedDbConnection conn( from ); conn->getLastError(); // just test connection @@ -841,6 +853,17 @@ namespace mongo { BSONObjIterator i( xfer["deleted"].Obj() ); while ( i.more() ){ BSONObj id = i.next().Obj(); + + // do not apply deletes if they do not belong to the chunk being migrated + BSONObj fullObj; + if ( Helpers::findById( cc() , ns.c_str() , id, fullObj ) ) { + if ( ! isInRange( fullObj , min , max ) ) { + log() << "not applying out of range deletion: " << fullObj << endl; + + continue; + } + } + Helpers::removeRange( ns , id , id, false , true , cmdLine.moveParanoia ? &rs : 0 ); didAnything = true; } @@ -242,8 +242,8 @@ namespace mongo { DBConfigPtr config = getDBConfig( *it , false ); if ( config.get() != NULL ){ ostringstream ss; - ss << "trying to add shard " << servers.toString() << " because local database " << *it; - ss << " exists in another " << config->getPrimary().toString(); + ss << "can't add shard " << servers.toString() << " because a local database '" << *it; + ss << "' exists in another " << config->getPrimary().toString(); errMsg = ss.str(); return false; } |