summaryrefslogtreecommitdiff
path: root/s/config.cpp
diff options
context:
space:
mode:
Diffstat (limited to 's/config.cpp')
-rw-r--r--s/config.cpp80
1 files changed, 51 insertions, 29 deletions
diff --git a/s/config.cpp b/s/config.cpp
index 23475eb..645e923 100644
--- a/s/config.cpp
+++ b/s/config.cpp
@@ -143,25 +143,14 @@ namespace mongo {
log() << "enable sharding on: " << ns << " with shard key: " << fieldsAndOrder << endl;
- // From this point on, 'ns' is going to be treated as a sharded collection. We assume this is the first
- // time it is seen by the sharded system and thus create the first chunk for the collection. All the remaining
- // chunks will be created as a by-product of splitting.
ci.shard( ns , fieldsAndOrder , unique );
ChunkManagerPtr cm = ci.getCM();
uassert( 13449 , "collections already sharded" , (cm->numChunks() == 0) );
- cm->createFirstChunk( getPrimary() );
+ cm->createFirstChunks( getPrimary() );
_save();
}
- try {
- getChunkManager(ns, true)->maybeChunkCollection();
- }
- catch ( UserException& e ) {
- // failure to chunk is not critical enough to abort the command (and undo the _save()'d configDB state)
- log() << "couldn't chunk recently created collection: " << ns << " " << e << endl;
- }
-
- return getChunkManager(ns);
+ return getChunkManager(ns,true,true);
}
bool DBConfig::removeSharding( const string& ns ) {
@@ -185,9 +174,9 @@ namespace mongo {
return true;
}
- ChunkManagerPtr DBConfig::getChunkManagerIfExists( const string& ns, bool shouldReload ){
+ ChunkManagerPtr DBConfig::getChunkManagerIfExists( const string& ns, bool shouldReload, bool forceReload ){
try{
- return getChunkManager( ns, shouldReload );
+ return getChunkManager( ns, shouldReload, forceReload );
}
catch( AssertionException& e ){
warning() << "chunk manager not found for " << ns << causedBy( e ) << endl;
@@ -195,7 +184,7 @@ namespace mongo {
}
}
- ChunkManagerPtr DBConfig::getChunkManager( const string& ns , bool shouldReload ) {
+ ChunkManagerPtr DBConfig::getChunkManager( const string& ns , bool shouldReload, bool forceReload ) {
BSONObj key;
bool unique;
ShardChunkVersion oldVersion;
@@ -205,7 +194,7 @@ namespace mongo {
CollectionInfo& ci = _collections[ns];
- bool earlyReload = ! ci.isSharded() && shouldReload;
+ bool earlyReload = ! ci.isSharded() && ( shouldReload || forceReload );
if ( earlyReload ) {
// this is to catch cases where there this is a new sharded collection
_reload();
@@ -214,7 +203,7 @@ namespace mongo {
massert( 10181 , (string)"not sharded:" + ns , ci.isSharded() );
assert( ! ci.key().isEmpty() );
- if ( ! shouldReload || earlyReload )
+ if ( ! ( shouldReload || forceReload ) || earlyReload )
return ci.getCM();
key = ci.key().copy();
@@ -225,10 +214,11 @@ namespace mongo {
assert( ! key.isEmpty() );
- if ( oldVersion > 0 ) {
+ BSONObj newest;
+ if ( oldVersion > 0 && ! forceReload ) {
ScopedDbConnection conn( configServer.modelServer() , 30.0 );
- BSONObj newest = conn->findOne( ShardNS::chunk ,
- Query( BSON( "ns" << ns ) ).sort( "lastmod" , -1 ) );
+ newest = conn->findOne( ShardNS::chunk ,
+ Query( BSON( "ns" << ns ) ).sort( "lastmod" , -1 ) );
conn.done();
if ( ! newest.isEmpty() ) {
@@ -240,16 +230,41 @@ namespace mongo {
return ci.getCM();
}
}
-
+
+ }
+ else if( oldVersion == 0 ){
+ warning() << "version 0 found when " << ( forceReload ? "reloading" : "checking" ) << " chunk manager"
+ << ", collection '" << ns << "' initially detected as sharded" << endl;
}
// we are not locked now, and want to load a new ChunkManager
- auto_ptr<ChunkManager> temp( new ChunkManager( ns , key , unique ) );
- if ( temp->numChunks() == 0 ) {
- // maybe we're not sharded any more
- reload(); // this is a full reload
- return getChunkManager( ns , false );
+ auto_ptr<ChunkManager> temp;
+
+ {
+ scoped_lock lll ( _hitConfigServerLock );
+
+ if ( ! newest.isEmpty() && ! forceReload ) {
+ // if we have a target we're going for
+ // see if we've hit already
+
+ scoped_lock lk( _lock );
+ CollectionInfo& ci = _collections[ns];
+ if ( ci.isSharded() && ci.getCM() ) {
+ ShardChunkVersion currentVersion = newest["lastmod"];
+ if ( currentVersion == ci.getCM()->getVersion() ) {
+ return ci.getCM();
+ }
+ }
+
+ }
+
+ temp.reset( new ChunkManager( ns , key , unique ) );
+ if ( temp->numChunks() == 0 ) {
+ // maybe we're not sharded any more
+ reload(); // this is a full reload
+ return getChunkManager( ns , false );
+ }
}
scoped_lock lk( _lock );
@@ -257,8 +272,15 @@ namespace mongo {
CollectionInfo& ci = _collections[ns];
massert( 14822 , (string)"state changed in the middle: " + ns , ci.isSharded() );
- if ( temp->getVersion() > ci.getCM()->getVersion() ) {
- // we only want to reset if we're newer
+ bool forced = false;
+ if ( temp->getVersion() > ci.getCM()->getVersion() ||
+ (forced = (temp->getVersion() == ci.getCM()->getVersion() && forceReload ) ) ) {
+
+ if( forced ){
+ warning() << "chunk manager reload forced for collection '" << ns << "', config version is " << temp->getVersion() << endl;
+ }
+
+ // we only want to reset if we're newer or equal and forced
// otherwise we go into a bad cycle
ci.resetCM( temp.release() );
}