diff options
Diffstat (limited to 's/config.cpp')
-rw-r--r-- | s/config.cpp | 97 |
1 files changed, 65 insertions, 32 deletions
diff --git a/s/config.cpp b/s/config.cpp index 645e923..0f5aaf1 100644 --- a/s/config.cpp +++ b/s/config.cpp @@ -150,7 +150,30 @@ namespace mongo { _save(); } - return getChunkManager(ns,true,true); + ChunkManagerPtr manager = getChunkManager(ns,true,true); + + // Tell the primary mongod to refresh it's data + // TODO: Think the real fix here is for mongos to just assume all collections sharded, when we get there + for( int i = 0; i < 4; i++ ){ + if( i == 3 ){ + warning() << "too many tries updating initial version of " << ns << " on shard primary " << getPrimary() << + ", other mongoses may not see the collection as sharded immediately" << endl; + break; + } + try { + ShardConnection conn( getPrimary(), ns ); + conn.setVersion(); + conn.done(); + break; + } + catch( DBException& e ){ + warning() << "could not update initial version of " << ns << " on shard primary " << getPrimary() << + causedBy( e ) << endl; + } + sleepsecs( i ); + } + + return manager; } bool DBConfig::removeSharding( const string& ns ) { @@ -192,14 +215,13 @@ namespace mongo { { scoped_lock lk( _lock ); - CollectionInfo& ci = _collections[ns]; - - bool earlyReload = ! ci.isSharded() && ( shouldReload || forceReload ); + bool earlyReload = ! _collections[ns].isSharded() && ( shouldReload || forceReload ); if ( earlyReload ) { // this is to catch cases where there this is a new sharded collection _reload(); - ci = _collections[ns]; } + + CollectionInfo& ci = _collections[ns]; massert( 10181 , (string)"not sharded:" + ns , ci.isSharded() ); assert( ! ci.key().isEmpty() ); @@ -710,42 +732,53 @@ namespace mongo { set<string> got; ScopedDbConnection conn( _primary, 30.0 ); - auto_ptr<DBClientCursor> c = conn->query( ShardNS::settings , BSONObj() ); - assert( c.get() ); - while ( c->more() ) { - BSONObj o = c->next(); - string name = o["_id"].valuestrsafe(); - got.insert( name ); - if ( name == "chunksize" ) { - LOG(1) << "MaxChunkSize: " << o["value"] << endl; - Chunk::MaxChunkSize = o["value"].numberInt() * 1024 * 1024; - } - else if ( name == "balancer" ) { - // ones we ignore here - } - else { - log() << "warning: unknown setting [" << name << "]" << endl; - } - } - if ( ! got.count( "chunksize" ) ) { - conn->insert( ShardNS::settings , BSON( "_id" << "chunksize" << - "value" << (Chunk::MaxChunkSize / ( 1024 * 1024 ) ) ) ); - } + try { + auto_ptr<DBClientCursor> c = conn->query( ShardNS::settings , BSONObj() ); + assert( c.get() ); + while ( c->more() ) { + + BSONObj o = c->next(); + string name = o["_id"].valuestrsafe(); + got.insert( name ); + if ( name == "chunksize" ) { + int csize = o["value"].numberInt(); + + // validate chunksize before proceeding + if ( csize == 0 ) { + // setting was not modified; mark as such + got.erase(name); + log() << "warning: invalid chunksize (" << csize << ") ignored" << endl; + } else { + LOG(1) << "MaxChunkSize: " << csize << endl; + Chunk::MaxChunkSize = csize * 1024 * 1024; + } + } + else if ( name == "balancer" ) { + // ones we ignore here + } + else { + log() << "warning: unknown setting [" << name << "]" << endl; + } + } - // indexes - try { + if ( ! got.count( "chunksize" ) ) { + conn->insert( ShardNS::settings , BSON( "_id" << "chunksize" << + "value" << (Chunk::MaxChunkSize / ( 1024 * 1024 ) ) ) ); + } + + // indexes conn->ensureIndex( ShardNS::chunk , BSON( "ns" << 1 << "min" << 1 ) , true ); conn->ensureIndex( ShardNS::chunk , BSON( "ns" << 1 << "shard" << 1 << "min" << 1 ) , true ); conn->ensureIndex( ShardNS::chunk , BSON( "ns" << 1 << "lastmod" << 1 ) , true ); conn->ensureIndex( ShardNS::shard , BSON( "host" << 1 ) , true ); + + conn.done(); } - catch ( std::exception& e ) { - log( LL_WARNING ) << "couldn't create indexes on config db: " << e.what() << endl; + catch ( DBException& e ) { + warning() << "couldn't load settings or create indexes on config db: " << e.what() << endl; } - - conn.done(); } string ConfigServer::getHost( string name , bool withPort ) { |