summaryrefslogtreecommitdiff
path: root/s/commands_admin.cpp
diff options
context:
space:
mode:
Diffstat (limited to 's/commands_admin.cpp')
-rw-r--r--s/commands_admin.cpp746
1 files changed, 540 insertions, 206 deletions
diff --git a/s/commands_admin.cpp b/s/commands_admin.cpp
index 0b2baa0..685d020 100644
--- a/s/commands_admin.cpp
+++ b/s/commands_admin.cpp
@@ -25,38 +25,40 @@
_ secondary indexes
*/
-#include "stdafx.h"
+#include "pch.h"
#include "../util/message.h"
-#include "../db/dbmessage.h"
+#include "../util/processinfo.h"
+#include "../util/stringutils.h"
+
#include "../client/connpool.h"
+
+#include "../db/dbmessage.h"
#include "../db/commands.h"
+#include "../db/stats/counters.h"
#include "config.h"
#include "chunk.h"
+#include "grid.h"
#include "strategy.h"
+#include "stats.h"
namespace mongo {
- extern string ourHostname;
-
namespace dbgrid_cmds {
- set<string> dbgridCommands;
-
class GridAdminCmd : public Command {
public:
- GridAdminCmd( const char * n ) : Command( n ){
- dbgridCommands.insert( n );
+ GridAdminCmd( const char * n ) : Command( n , false, tolowerString(n).c_str() ){
}
- virtual bool slaveOk(){
+ virtual bool slaveOk() const {
return true;
}
- virtual bool adminOnly() {
+ virtual bool adminOnly() const {
return true;
}
// all grid commands are designed not to lock
- virtual LockType locktype(){ return NONE; }
+ virtual LockType locktype() const { return NONE; }
};
// --------------- misc commands ----------------------
@@ -67,64 +69,131 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << " shows status/reachability of servers in the cluster";
}
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
- result.append("configserver", configServer.getPrimary() );
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ result.append("configserver", configServer.getPrimary().getConnString() );
result.append("isdbgrid", 1);
return true;
}
} netstat;
-
- class ListGridCommands : public GridAdminCmd {
+
+ class ServerStatusCmd : public Command {
public:
- ListGridCommands() : GridAdminCmd("gridcommands") { }
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ ServerStatusCmd() : Command( "serverStatus" , true ){
+ _started = time(0);
+ }
+
+ virtual bool slaveOk() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ result.append("uptime",(double) (time(0)-_started));
+ result.appendDate( "localTime" , jsTime() );
- BSONObjBuilder arr;
- int num=0;
- for ( set<string>::iterator i = dbgridCommands.begin(); i != dbgridCommands.end(); i++ ){
- string s = BSONObjBuilder::numStr( num++ );
- arr.append( s.c_str() , *i );
+ {
+ BSONObjBuilder t( result.subobjStart( "mem" ) );
+
+ ProcessInfo p;
+ if ( p.supported() ){
+ t.appendNumber( "resident" , p.getResidentSize() );
+ t.appendNumber( "virtual" , p.getVirtualMemorySize() );
+ t.appendBool( "supported" , true );
+ }
+ else {
+ result.append( "note" , "not all mem info support on this platform" );
+ t.appendBool( "supported" , false );
+ }
+
+ t.done();
}
- result.appendArray( "commands" , arr.done() );
- return true;
+ {
+ BSONObjBuilder bb( result.subobjStart( "connections" ) );
+ bb.append( "current" , connTicketHolder.used() );
+ bb.append( "available" , connTicketHolder.available() );
+ bb.done();
+ }
+
+ {
+ BSONObjBuilder bb( result.subobjStart( "extra_info" ) );
+ bb.append("note", "fields vary by platform");
+ ProcessInfo p;
+ p.getExtraInfo(bb);
+ bb.done();
+ }
+
+ result.append( "opcounters" , globalOpCounters.getObj() );
+ {
+ BSONObjBuilder bb( result.subobjStart( "ops" ) );
+ bb.append( "sharded" , opsSharded.getObj() );
+ bb.append( "notSharded" , opsNonSharded.getObj() );
+ bb.done();
+ }
+
+ result.append( "shardCursorType" , shardedCursorTypes.getObj() );
+
+ {
+ BSONObjBuilder asserts( result.subobjStart( "asserts" ) );
+ asserts.append( "regular" , assertionCount.regular );
+ asserts.append( "warning" , assertionCount.warning );
+ asserts.append( "msg" , assertionCount.msg );
+ asserts.append( "user" , assertionCount.user );
+ asserts.append( "rollovers" , assertionCount.rollovers );
+ asserts.done();
+ }
+
+ return 1;
}
- } listGridCommands;
- // ------------ database level commands -------------
+ time_t _started;
+ } cmdServerStatus;
- class ListDatabaseCommand : public GridAdminCmd {
+ class FsyncCommand : public GridAdminCmd {
public:
- ListDatabaseCommand() : GridAdminCmd("listdatabases") { }
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
- ScopedDbConnection conn( configServer.getPrimary() );
+ FsyncCommand() : GridAdminCmd( "fsync" ){}
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ if ( cmdObj["lock"].trueValue() ){
+ errmsg = "can't do lock through mongos";
+ return false;
+ }
+
+ BSONObjBuilder sub;
- auto_ptr<DBClientCursor> cursor = conn->query( "config.databases" , BSONObj() );
+ bool ok = true;
+ int numFiles = 0;
+
+ vector<Shard> shards;
+ Shard::getAllShards( shards );
+ for ( vector<Shard>::iterator i=shards.begin(); i!=shards.end(); i++ ){
+ Shard s = *i;
- BSONObjBuilder list;
- int num = 0;
- while ( cursor->more() ){
- string s = BSONObjBuilder::numStr( num++ );
+ BSONObj x = s.runCommand( "admin" , "fsync" );
+ sub.append( s.getName() , x );
- BSONObj o = cursor->next();
- list.append( s.c_str() , o["name"].valuestrsafe() );
+ if ( ! x["ok"].trueValue() ){
+ ok = false;
+ errmsg = x["errmsg"].String();
+ }
+
+ numFiles += x["numFiles"].numberInt();
}
-
- result.appendArray("databases" , list.obj() );
- conn.done();
-
- return true;
+
+ result.append( "numFiles" , numFiles );
+ result.append( "all" , sub.obj() );
+ return ok;
}
- } gridListDatabase;
+ } fsyncCmd;
+
+ // ------------ database level commands -------------
class MoveDatabasePrimaryCommand : public GridAdminCmd {
public:
- MoveDatabasePrimaryCommand() : GridAdminCmd("moveprimary") { }
+ MoveDatabasePrimaryCommand() : GridAdminCmd("movePrimary") { }
virtual void help( stringstream& help ) const {
- help << " example: { moveprimary : 'foo' , to : 'localhost:9999' } TODO: locking? ";
+ help << " example: { moveprimary : 'foo' , to : 'localhost:9999' }";
+ // TODO: locking?
}
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
- string dbname = cmdObj["moveprimary"].valuestrsafe();
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ string dbname = cmdObj.firstElement().valuestrsafe();
if ( dbname.size() == 0 ){
errmsg = "no db";
@@ -136,7 +205,7 @@ namespace mongo {
return false;
}
- DBConfig * config = grid.getDBConfig( dbname , false );
+ DBConfigPtr config = grid.getDBConfig( dbname , false );
if ( ! config ){
errmsg = "can't find db!";
return false;
@@ -147,71 +216,68 @@ namespace mongo {
errmsg = "you have to specify where you want to move it";
return false;
}
+ Shard s = Shard::make( to );
- if ( to == config->getPrimary() ){
+ if ( config->getPrimary() == s.getConnString() ){
errmsg = "thats already the primary";
return false;
}
- if ( ! grid.knowAboutShard( to ) ){
+ if ( ! grid.knowAboutShard( s.getConnString() ) ){
errmsg = "that server isn't known to me";
return false;
}
-
- ScopedDbConnection conn( configServer.getPrimary() );
-
- log() << "moving " << dbname << " primary from: " << config->getPrimary() << " to: " << to << endl;
+
+ log() << "movePrimary: moving " << dbname << " primary from: " << config->getPrimary().toString()
+ << " to: " << s.toString() << endl;
// TODO LOCKING: this is not safe with multiple mongos
+ ScopedDbConnection toconn( s.getConnString() );
- ScopedDbConnection toconn( to );
-
- // TODO AARON - we need a clone command which replays operations from clone start to now
- // using a seperate smaller oplog
+ // TODO ERH - we need a clone command which replays operations from clone start to now
+ // can just use local.oplog.$main
BSONObj cloneRes;
- bool worked = toconn->runCommand( dbname.c_str() , BSON( "clone" << config->getPrimary() ) , cloneRes );
+ bool worked = toconn->runCommand( dbname.c_str() , BSON( "clone" << config->getPrimary().getConnString() ) , cloneRes );
toconn.done();
+
if ( ! worked ){
log() << "clone failed" << cloneRes << endl;
errmsg = "clone failed";
- conn.done();
return false;
}
ScopedDbConnection fromconn( config->getPrimary() );
- config->setPrimary( to );
- config->save( true );
+ config->setPrimary( s.getConnString() );
- log() << " dropping " << dbname << " from old" << endl;
+ log() << "movePrimary: dropping " << dbname << " from old" << endl;
fromconn->dropDatabase( dbname.c_str() );
fromconn.done();
- result << "primary" << to;
+ result << "primary " << s.toString();
- conn.done();
return true;
}
} movePrimary;
class EnableShardingCmd : public GridAdminCmd {
public:
- EnableShardingCmd() : GridAdminCmd( "enablesharding" ){}
+ EnableShardingCmd() : GridAdminCmd( "enableSharding" ){}
virtual void help( stringstream& help ) const {
help
<< "Enable sharding for a db. (Use 'shardcollection' command afterwards.)\n"
<< " { enablesharding : \"<dbname>\" }\n";
}
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
- string dbname = cmdObj["enablesharding"].valuestrsafe();
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ string dbname = cmdObj.firstElement().valuestrsafe();
if ( dbname.size() == 0 ){
errmsg = "no db";
return false;
}
- DBConfig * config = grid.getDBConfig( dbname );
+ DBConfigPtr config = grid.getDBConfig( dbname );
if ( config->isShardingEnabled() ){
errmsg = "already enabled";
return false;
@@ -220,7 +286,6 @@ namespace mongo {
log() << "enabling sharding on: " << dbname << endl;
config->enableSharding();
- config->save( true );
return true;
}
@@ -230,20 +295,22 @@ namespace mongo {
class ShardCollectionCmd : public GridAdminCmd {
public:
- ShardCollectionCmd() : GridAdminCmd( "shardcollection" ){}
+ ShardCollectionCmd() : GridAdminCmd( "shardCollection" ){}
+
virtual void help( stringstream& help ) const {
help
<< "Shard a collection. Requires key. Optional unique. Sharding must already be enabled for the database.\n"
<< " { enablesharding : \"<dbname>\" }\n";
}
- bool run(const char *cmdns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
- string ns = cmdObj["shardcollection"].valuestrsafe();
+
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ string ns = cmdObj.firstElement().valuestrsafe();
if ( ns.size() == 0 ){
errmsg = "no ns";
return false;
}
- DBConfig * config = grid.getDBConfig( ns );
+ DBConfigPtr config = grid.getDBConfig( ns );
if ( ! config->isShardingEnabled() ){
errmsg = "sharding not enabled for db";
return false;
@@ -258,23 +325,60 @@ namespace mongo {
if ( key.isEmpty() ){
errmsg = "no shard key";
return false;
- } else if (key.nFields() > 1){
- errmsg = "compound shard keys not supported yet";
- return false;
+ }
+
+ BSONForEach(e, key){
+ if (!e.isNumber() || e.number() != 1.0){
+ errmsg = "shard keys must all be ascending";
+ return false;
+ }
}
if ( ns.find( ".system." ) != string::npos ){
errmsg = "can't shard system namespaces";
return false;
}
-
+
+ // Sharding interacts with indexing in at least two ways:
+ //
+ // 1. A unique index must have the sharding key as its prefix. Otherwise maintainig uniqueness would
+ // require coordinated access to all shards. Trying to shard a collection with such an index is not
+ // allowed.
+ //
+ // 2. Sharding a collection requires an index over the sharding key. That index must be create upfront.
+ // The rationale is that sharding a non-empty collection would need to create the index and that could
+ // be slow. Requiring the index upfront allows the admin to plan before sharding and perhaps use
+ // background index construction. One exception to the rule: empty collections. It's fairly easy to
+ // create the index as part of the sharding process.
+ //
+ // We enforce both these conditions in what comes next.
+
{
+ ShardKeyPattern proposedKey( key );
+ bool hasShardIndex = false;
+
ScopedDbConnection conn( config->getPrimary() );
BSONObjBuilder b;
b.append( "ns" , ns );
- b.appendBool( "unique" , true );
- if ( conn->count( config->getName() + ".system.indexes" , b.obj() ) ){
- errmsg = "can't shard collection with unique indexes";
+
+ auto_ptr<DBClientCursor> cursor = conn->query( config->getName() + ".system.indexes" , b.obj() );
+ while ( cursor->more() ){
+ BSONObj idx = cursor->next();
+
+ // Is index key over the sharding key? Remember that.
+ if ( key.woCompare( idx["key"].embeddedObjectUserCheck() ) == 0 ){
+ hasShardIndex = true;
+ }
+
+ // Not a unique index? Move on.
+ if ( idx["unique"].eoo() || ! idx["unique"].Bool() )
+ continue;
+
+ // Shard key is prefix of unique index? Move on.
+ if ( proposedKey.isPrefixOf( idx["key"].embeddedObjectUserCheck() ) )
+ continue;
+
+ errmsg = (string)"can't shard collection with unique index on: " + idx.toString();
conn.done();
return false;
}
@@ -286,13 +390,17 @@ namespace mongo {
return false;
}
+ if ( ! hasShardIndex && ( conn->count( ns ) != 0 ) ){
+ errmsg = "please create an index over the sharding key before sharding.";
+ return false;
+ }
+
conn.done();
}
-
- log() << "CMD: shardcollection: " << cmdObj << endl;
+
+ tlog() << "CMD: shardcollection: " << cmdObj << endl;
config->shardCollection( ns , key , cmdObj["unique"].trueValue() );
- config->save( true );
result << "collectionsharded" << ns;
return true;
@@ -306,26 +414,26 @@ namespace mongo {
help << " example: { getShardVersion : 'alleyinsider.foo' } ";
}
- bool run(const char *cmdns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
- string ns = cmdObj["getShardVersion"].valuestrsafe();
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ string ns = cmdObj.firstElement().valuestrsafe();
if ( ns.size() == 0 ){
errmsg = "need to speciy fully namespace";
return false;
}
- DBConfig * config = grid.getDBConfig( ns );
+ DBConfigPtr config = grid.getDBConfig( ns );
if ( ! config->isSharded( ns ) ){
errmsg = "ns not sharded.";
return false;
}
- ChunkManager * cm = config->getChunkManager( ns );
+ ChunkManagerPtr cm = config->getChunkManager( ns );
if ( ! cm ){
errmsg = "no chunk manager?";
return false;
}
-
- result.appendTimestamp( "version" , cm->getVersion() );
+ cm->_printChunks();
+ result.appendTimestamp( "version" , cm->getVersion().toLong() );
return 1;
}
@@ -336,22 +444,24 @@ namespace mongo {
SplitCollectionHelper( const char * name ) : GridAdminCmd( name ) , _name( name ){}
virtual void help( stringstream& help ) const {
help
- << " example: { shard : 'alleyinsider.blog.posts' , find : { ts : 1 } } - split the shard that contains give key \n"
- << " example: { shard : 'alleyinsider.blog.posts' , middle : { ts : 1 } } - split the shard that contains the key with this as the middle \n"
+ << " example: { split : 'alleyinsider.blog.posts' , find : { ts : 1 } } - split the shard that contains give key \n"
+ << " example: { split : 'alleyinsider.blog.posts' , middle : { ts : 1 } } - split the shard that contains the key with this as the middle \n"
<< " NOTE: this does not move move the chunks, it merely creates a logical seperation \n"
;
}
- virtual bool _split( BSONObjBuilder& result , string&errmsg , const string& ns , ChunkManager * manager , Chunk& old , BSONObj middle ) = 0;
+ virtual bool _split( BSONObjBuilder& result , string&errmsg , const string& ns , ChunkManagerPtr manager , ChunkPtr old , BSONObj middle ) = 0;
- bool run(const char *cmdns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
- string ns = cmdObj[_name.c_str()].valuestrsafe();
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ ShardConnection::sync();
+
+ string ns = cmdObj.firstElement().valuestrsafe();
if ( ns.size() == 0 ){
errmsg = "no ns";
return false;
}
- DBConfig * config = grid.getDBConfig( ns );
+ DBConfigPtr config = grid.getDBConfig( ns );
if ( ! config->isSharded( ns ) ){
errmsg = "ns not sharded. have to shard before can split";
return false;
@@ -367,8 +477,8 @@ namespace mongo {
}
}
- ChunkManager * info = config->getChunkManager( ns );
- Chunk& old = info->findChunk( find );
+ ChunkManagerPtr info = config->getChunkManager( ns );
+ ChunkPtr old = info->findChunk( find );
return _split( result , errmsg , ns , info , old , cmdObj.getObjectField( "middle" ) );
}
@@ -379,15 +489,15 @@ namespace mongo {
class SplitValueCommand : public SplitCollectionHelper {
public:
- SplitValueCommand() : SplitCollectionHelper( "splitvalue" ){}
- virtual bool _split( BSONObjBuilder& result , string& errmsg , const string& ns , ChunkManager * manager , Chunk& old , BSONObj middle ){
+ SplitValueCommand() : SplitCollectionHelper( "splitValue" ){}
+ virtual bool _split( BSONObjBuilder& result , string& errmsg , const string& ns , ChunkManagerPtr manager , ChunkPtr old , BSONObj middle ){
- result << "shardinfo" << old.toString();
+ result << "shardinfo" << old->toString();
result.appendBool( "auto" , middle.isEmpty() );
if ( middle.isEmpty() )
- middle = old.pickSplitPoint();
+ middle = old->pickSplitPoint();
result.append( "middle" , middle );
@@ -400,14 +510,17 @@ namespace mongo {
class SplitCollection : public SplitCollectionHelper {
public:
SplitCollection() : SplitCollectionHelper( "split" ){}
- virtual bool _split( BSONObjBuilder& result , string& errmsg , const string& ns , ChunkManager * manager , Chunk& old , BSONObj middle ){
-
+ virtual bool _split( BSONObjBuilder& result , string& errmsg , const string& ns , ChunkManagerPtr manager , ChunkPtr old , BSONObj middle ){
+ assert( old.get() );
log() << "splitting: " << ns << " shard: " << old << endl;
if ( middle.isEmpty() )
- old.split();
- else
- old.split( middle );
+ old->split();
+ else {
+ vector<BSONObj> splitPoints;
+ splitPoints.push_back( middle );
+ old->multiSplit( splitPoints );
+ }
return true;
}
@@ -417,18 +530,21 @@ namespace mongo {
class MoveChunkCmd : public GridAdminCmd {
public:
- MoveChunkCmd() : GridAdminCmd( "movechunk" ){}
+ MoveChunkCmd() : GridAdminCmd( "moveChunk" ){}
virtual void help( stringstream& help ) const {
help << "{ movechunk : 'test.foo' , find : { num : 1 } , to : 'localhost:30001' }";
}
- bool run(const char *cmdns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
- string ns = cmdObj["movechunk"].valuestrsafe();
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ ShardConnection::sync();
+
+ Timer t;
+ string ns = cmdObj.firstElement().valuestrsafe();
if ( ns.size() == 0 ){
errmsg = "no ns";
return false;
}
- DBConfig * config = grid.getDBConfig( ns );
+ DBConfigPtr config = grid.getDBConfig( ns );
if ( ! config->isSharded( ns ) ){
errmsg = "ns not sharded. have to shard before can move a chunk";
return false;
@@ -440,31 +556,29 @@ namespace mongo {
return false;
}
- string to = cmdObj["to"].valuestrsafe();
- if ( ! to.size() ){
+ string toString = cmdObj["to"].valuestrsafe();
+ if ( ! toString.size() ){
errmsg = "you have to specify where you want to move the chunk";
return false;
}
- log() << "CMD: movechunk: " << cmdObj << endl;
+ Shard to = Shard::make( toString );
+
+ tlog() << "CMD: movechunk: " << cmdObj << endl;
- ChunkManager * info = config->getChunkManager( ns );
- Chunk& c = info->findChunk( find );
- string from = c.getShard();
+ ChunkManagerPtr info = config->getChunkManager( ns );
+ ChunkPtr c = info->findChunk( find );
+ const Shard& from = c->getShard();
if ( from == to ){
errmsg = "that chunk is already on that shard";
return false;
}
-
- if ( ! grid.knowAboutShard( to ) ){
- errmsg = "that shard isn't known to me";
- return false;
- }
-
- if ( ! c.moveAndCommit( to , errmsg ) )
+
+ if ( ! c->moveAndCommit( to , errmsg ) )
return false;
+ result.append( "millis" , t.millis() );
return true;
}
} moveChunkCmd;
@@ -473,11 +587,11 @@ namespace mongo {
class ListShardsCmd : public GridAdminCmd {
public:
- ListShardsCmd() : GridAdminCmd("listshards") { }
+ ListShardsCmd() : GridAdminCmd("listShards") { }
virtual void help( stringstream& help ) const {
help << "list all shards of the system";
}
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
ScopedDbConnection conn( configServer.getPrimary() );
vector<BSONObj> all;
@@ -497,86 +611,124 @@ namespace mongo {
/* a shard is a single mongod server or a replica pair. add it (them) to the cluster as a storage partition. */
class AddShard : public GridAdminCmd {
public:
- AddShard() : GridAdminCmd("addshard") { }
+ AddShard() : GridAdminCmd("addShard") { }
virtual void help( stringstream& help ) const {
help << "add a new shard to the system";
}
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
- ScopedDbConnection conn( configServer.getPrimary() );
-
-
- string host = cmdObj["addshard"].valuestrsafe();
-
- if ( host == "localhost" || host.find( "localhost:" ) == 0 ||
- host == "127.0.0.1" || host.find( "127.0.0.1:" ) == 0 ){
- if ( ! cmdObj["allowLocal"].trueValue() ){
- errmsg =
- "can't use localhost as a shard since all shards need to communicate. "
- "allowLocal to override for testing";
- return false;
- }
- }
-
- if ( host.find( ":" ) == string::npos ){
- stringstream ss;
- ss << host << ":" << CmdLine::ShardServerPort;
- host = ss.str();
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ HostAndPort shardAddr( cmdObj.firstElement().valuestrsafe() );
+ if ( shardAddr.isLocalHost() != grid.allowLocalHost() ){
+ errmsg = "can't use localhost as a shard since all shards need to communicate. "
+ "either use all shards and configdbs in localhost or all in actual IPs " ;
+ log() << "addshard request " << cmdObj << " failed: attempt to mix localhosts and IPs" << endl;
+ return false;
}
- BSONObj shard;
- {
- BSONObjBuilder b;
- b.append( "host" , host );
- if ( cmdObj["maxSize"].isNumber() )
- b.append( cmdObj["maxSize"] );
- shard = b.obj();
+ if ( ! shardAddr.hasPort() ){
+ shardAddr.setPort( CmdLine::ShardServerPort );
}
- BSONObj old = conn->findOne( "config.shards" , shard );
- if ( ! old.isEmpty() ){
- result.append( "msg" , "already exists" );
- conn.done();
- return false;
- }
+ string name = "";
+ if ( cmdObj["name"].type() == String ) {
+ name = cmdObj["name"].valuestrsafe();
+ }
- try {
- ScopedDbConnection newShardConn( host );
- newShardConn->getLastError();
- newShardConn.done();
+ long long maxSize = 0;
+ if ( cmdObj[ ShardFields::maxSize.name() ].isNumber() ){
+ maxSize = cmdObj[ ShardFields::maxSize.name() ].numberLong();
}
- catch ( DBException& e ){
- errmsg = "couldn't connect to new shard";
- result.append( "host" , host );
- result.append( "exception" , e.what() );
- conn.done();
+
+ if ( ! grid.addShard( &name , shardAddr.toString() , maxSize , errmsg ) ){
+ log() << "addshard request " << cmdObj << " failed: " << errmsg << endl;
return false;
}
-
-
- conn->insert( "config.shards" , shard );
- result.append( "added" , shard["host"].valuestrsafe() );
- conn.done();
+ result << "shardAdded" << name;
return true;
}
+
} addServer;
+ /* See usage docs at:
+ * http://www.mongodb.org/display/DOCS/Configuring+Sharding#ConfiguringSharding-Removingashard
+ */
class RemoveShardCmd : public GridAdminCmd {
public:
- RemoveShardCmd() : GridAdminCmd("removeshard") { }
+ RemoveShardCmd() : GridAdminCmd("removeShard") { }
virtual void help( stringstream& help ) const {
- help << "remove a shard to the system.\nshard must be empty or command will return an error.";
+ help << "remove a shard to the system.";
}
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
- if ( 1 ){
- errmsg = "removeshard not yet implemented";
- return 0;
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ string target = cmdObj.firstElement().valuestrsafe();
+ Shard s = Shard::make( target );
+ if ( ! grid.knowAboutShard( s.getConnString() ) ){
+ errmsg = "unknown shard";
+ return false;
}
ScopedDbConnection conn( configServer.getPrimary() );
- BSONObj server = BSON( "host" << cmdObj["removeshard"].valuestrsafe() );
- conn->remove( "config.shards" , server );
+ // If the server is not yet draining chunks, put it in draining mode.
+ BSONObj searchDoc = BSON( "_id" << s.getName() );
+ BSONObj drainingDoc = BSON( "_id" << s.getName() << ShardFields::draining(true) );
+ BSONObj shardDoc = conn->findOne( "config.shards", drainingDoc );
+ if ( shardDoc.isEmpty() ){
+
+ // TODO prevent move chunks to this shard.
+
+ log() << "going to start draining shard: " << s.getName() << endl;
+ BSONObj newStatus = BSON( "$set" << BSON( ShardFields::draining(true) ) );
+ conn->update( "config.shards" , searchDoc , newStatus, false /* do no upsert */);
+
+ errmsg = conn->getLastError();
+ if ( errmsg.size() ){
+ log() << "error starting remove shard: " << s.getName() << " err: " << errmsg << endl;
+ return false;
+ }
+
+ Shard::reloadShardInfo();
+
+ result.append( "msg" , "draining started successfully" );
+ result.append( "state" , "started" );
+ result.append( "shard" , s.getName() );
+ conn.done();
+ return true;
+ }
+
+ // If the server has been completely drained, remove it from the ConfigDB.
+ // Check not only for chunks but also databases.
+ BSONObj shardIDDoc = BSON( "shard" << shardDoc[ "_id" ].str() );
+ long long chunkCount = conn->count( "config.chunks" , shardIDDoc );
+ BSONObj primaryDoc = BSON( "primary" << shardDoc[ "_id" ].str() );
+ long long dbCount = conn->count( "config.databases" , primaryDoc );
+ if ( ( chunkCount == 0 ) && ( dbCount == 0 ) ){
+ log() << "going to remove shard: " << s.getName() << endl;
+ conn->remove( "config.shards" , searchDoc );
+
+ errmsg = conn->getLastError();
+ if ( errmsg.size() ){
+ log() << "error concluding remove shard: " << s.getName() << " err: " << errmsg << endl;
+ return false;
+ }
+
+ Shard::removeShard( shardDoc[ "_id" ].str() );
+ Shard::reloadShardInfo();
+
+ result.append( "msg" , "removeshard completed successfully" );
+ result.append( "state" , "completed" );
+ result.append( "shard" , s.getName() );
+ conn.done();
+ return true;
+ }
+
+ // If the server is already in draining mode, just report on its progress.
+ // Report on databases (not just chunks) that are left too.
+ result.append( "msg" , "draining ongoing" );
+ result.append( "state" , "ongoing" );
+ BSONObjBuilder inner;
+ inner.append( "chunks" , chunkCount );
+ inner.append( "dbs" , dbCount );
+ result.append( "remaining" , inner.obj() );
conn.done();
return true;
@@ -588,48 +740,71 @@ namespace mongo {
class IsDbGridCmd : public Command {
public:
- virtual LockType locktype(){ return NONE; }
- virtual bool slaveOk() {
+ virtual LockType locktype() const { return NONE; }
+ virtual bool slaveOk() const {
return true;
}
IsDbGridCmd() : Command("isdbgrid") { }
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
result.append("isdbgrid", 1);
- result.append("hostname", ourHostname);
+ result.append("hostname", getHostNameCached());
return true;
}
} isdbgrid;
class CmdIsMaster : public Command {
public:
- virtual LockType locktype(){ return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual bool requiresAuth() { return false; }
- virtual bool slaveOk() {
+ virtual bool slaveOk() const {
return true;
}
virtual void help( stringstream& help ) const {
help << "test if this is master half of a replica pair";
}
CmdIsMaster() : Command("ismaster") { }
- virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
result.append("ismaster", 1.0 );
result.append("msg", "isdbgrid");
return true;
}
} ismaster;
+ class CmdWhatsMyUri : public Command {
+ public:
+ CmdWhatsMyUri() : Command("whatsmyuri") { }
+ virtual bool logTheOp() {
+ return false; // the modification will be logged directly
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual LockType locktype() const { return NONE; }
+ virtual bool requiresAuth() {
+ return false;
+ }
+ virtual void help( stringstream &help ) const {
+ help << "{whatsmyuri:1}";
+ }
+ virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ result << "you" << ClientInfo::get()->getRemote();
+ return true;
+ }
+ } cmdWhatsMyUri;
+
+
class CmdShardingGetPrevError : public Command {
public:
- virtual LockType locktype(){ return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual bool requiresAuth() { return false; }
- virtual bool slaveOk() {
+ virtual bool slaveOk() const {
return true;
}
virtual void help( stringstream& help ) const {
help << "get previous error (since last reseterror command)";
}
- CmdShardingGetPrevError() : Command("getpreverror") { }
- virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ CmdShardingGetPrevError() : Command( "getPrevError" , false , "getpreverror") { }
+ virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
errmsg += "getpreverror not supported for sharded environments";
return false;
}
@@ -637,20 +812,41 @@ namespace mongo {
class CmdShardingGetLastError : public Command {
public:
- virtual LockType locktype(){ return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual bool requiresAuth() { return false; }
- virtual bool slaveOk() {
+ virtual bool slaveOk() const {
return true;
}
virtual void help( stringstream& help ) const {
help << "check for an error on the last command executed";
}
- CmdShardingGetLastError() : Command("getlasterror") { }
- virtual bool run(const char *nsraw, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- string dbName = nsraw;
- dbName = dbName.substr( 0 , dbName.size() - 5 );
+ CmdShardingGetLastError() : Command("getLastError" , false , "getlasterror") { }
+
+ void addWriteBack( vector<OID>& all , const BSONObj& o ){
+ BSONElement e = o["writeback"];
+
+ if ( e.type() == jstOID )
+ all.push_back( e.OID() );
+ }
+
+ void handleWriteBacks( vector<OID>& all ){
+ if ( all.size() == 0 )
+ return;
- DBConfig * conf = grid.getDBConfig( dbName , false );
+ for ( unsigned i=0; i<all.size(); i++ ){
+ waitForWriteback( all[i] );
+ }
+ }
+
+ virtual bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ LastError *le = lastError.disableForCommand();
+ {
+ assert( le );
+ if ( le->msg.size() && le->nPrev == 1 ){
+ le->appendSelf( result );
+ return true;
+ }
+ }
ClientInfo * client = ClientInfo::get();
set<string> * shards = client->getPrev();
@@ -659,30 +855,79 @@ namespace mongo {
result.appendNull( "err" );
return true;
}
+
+ //log() << "getlasterror enter: " << shards->size() << endl;
+
+
+ vector<OID> writebacks;
+ // handle single server
if ( shards->size() == 1 ){
string theShard = *(shards->begin() );
result.append( "theshard" , theShard.c_str() );
- ScopedDbConnection conn( theShard );
+ ShardConnection conn( theShard , "" );
BSONObj res;
- bool ok = conn->runCommand( conf->getName() , cmdObj , res );
+ bool ok = conn->runCommand( dbName , cmdObj , res );
+ //log() << "\t" << res << endl;
result.appendElements( res );
conn.done();
+ result.append( "singleShard" , theShard );
+ addWriteBack( writebacks , res );
+
+ // hit other machines just to block
+ for ( set<string>::const_iterator i=client->sinceLastGetError().begin(); i!=client->sinceLastGetError().end(); ++i ){
+ string temp = *i;
+ if ( temp == theShard )
+ continue;
+
+ ShardConnection conn( temp , "" );
+ addWriteBack( writebacks , conn->getLastErrorDetailed() );
+ conn.done();
+ }
+ client->clearSinceLastGetError();
+ handleWriteBacks( writebacks );
return ok;
}
+ BSONArrayBuilder bbb( result.subarrayStart( "shards" ) );
+
+ long long n = 0;
+
+ // hit each shard
vector<string> errors;
for ( set<string>::iterator i = shards->begin(); i != shards->end(); i++ ){
string theShard = *i;
- ScopedDbConnection conn( theShard );
- string temp = conn->getLastError();
- if ( temp.size() )
+ bbb.append( theShard );
+ ShardConnection conn( theShard , "" );
+ BSONObj res;
+ bool ok = conn->runCommand( dbName , cmdObj , res );
+ addWriteBack( writebacks, res );
+ string temp = DBClientWithCommands::getLastErrorString( res );
+ if ( ok == false || temp.size() )
errors.push_back( temp );
+ n += res["n"].numberLong();
conn.done();
}
+ bbb.done();
+
+ result.appendNumber( "n" , n );
+
+ // hit other machines just to block
+ for ( set<string>::const_iterator i=client->sinceLastGetError().begin(); i!=client->sinceLastGetError().end(); ++i ){
+ string temp = *i;
+ if ( shards->count( temp ) )
+ continue;
+
+ ShardConnection conn( temp , "" );
+ addWriteBack( writebacks, conn->getLastErrorDetailed() );
+ conn.done();
+ }
+ client->clearSinceLastGetError();
+
if ( errors.size() == 0 ){
result.appendNull( "err" );
+ handleWriteBacks( writebacks );
return true;
}
@@ -690,13 +935,102 @@ namespace mongo {
BSONObjBuilder all;
for ( unsigned i=0; i<errors.size(); i++ ){
- all.append( all.numStr( i ).c_str() , errors[i].c_str() );
+ all.append( all.numStr( i ) , errors[i].c_str() );
}
result.appendArray( "errs" , all.obj() );
+ handleWriteBacks( writebacks );
return true;
}
} cmdGetLastError;
}
+
+ class CmdListDatabases : public Command {
+ public:
+ CmdListDatabases() : Command("listDatabases", false , "listdatabases" ) {}
+
+ virtual bool logTheOp() { return false; }
+ virtual bool slaveOk() const { return true; }
+ virtual bool slaveOverrideOk() { return true; }
+ virtual bool adminOnly() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+ virtual void help( stringstream& help ) const { help << "list databases on cluster"; }
+
+ bool run(const string& , BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
+ vector<Shard> shards;
+ Shard::getAllShards( shards );
+
+ map<string,long long> sizes;
+ map< string,shared_ptr<BSONObjBuilder> > dbShardInfo;
+
+ for ( vector<Shard>::iterator i=shards.begin(); i!=shards.end(); i++ ){
+ Shard s = *i;
+ BSONObj x = s.runCommand( "admin" , "listDatabases" );
+
+ BSONObjIterator j( x["databases"].Obj() );
+ while ( j.more() ){
+ BSONObj theDB = j.next().Obj();
+
+ string name = theDB["name"].String();
+ long long size = theDB["sizeOnDisk"].numberLong();
+
+ long long& totalSize = sizes[name];
+ if ( size == 1 ){
+ if ( totalSize <= 1 )
+ totalSize = 1;
+ }
+ else
+ totalSize += size;
+
+ shared_ptr<BSONObjBuilder>& bb = dbShardInfo[name];
+ if ( ! bb.get() )
+ bb.reset( new BSONObjBuilder() );
+ bb->appendNumber( s.getName() , size );
+ }
+
+ }
+
+ long long totalSize = 0;
+
+ BSONArrayBuilder bb( result.subarrayStart( "databases" ) );
+ for ( map<string,long long>::iterator i=sizes.begin(); i!=sizes.end(); ++i ){
+ string name = i->first;
+ long long size = i->second;
+ totalSize += size;
+
+ BSONObjBuilder temp;
+ temp.append( "name" , name );
+ temp.appendNumber( "size" , size );
+ temp.appendBool( "empty" , size == 1 );
+ temp.append( "shards" , dbShardInfo[name]->obj() );
+
+ bb.append( temp.obj() );
+ }
+ bb.done();
+
+ result.appendNumber( "totalSize" , totalSize );
+ result.appendNumber( "totalSizeMb" , totalSize / ( 1024 * 1024 ) );
+
+ return 1;
+ }
+
+ } cmdListDatabases;
+
+ class CmdCloseAllDatabases : public Command {
+ public:
+ CmdCloseAllDatabases() : Command("closeAllDatabases", false , "closeAllDatabases" ) {}
+ virtual bool logTheOp() { return false; }
+ virtual bool slaveOk() const { return true; }
+ virtual bool slaveOverrideOk() { return true; }
+ virtual bool adminOnly() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+ virtual void help( stringstream& help ) const { help << "Not supported sharded"; }
+
+ bool run(const string& , BSONObj& jsobj, string& errmsg, BSONObjBuilder& /*result*/, bool /*fromRepl*/) {
+ errmsg = "closeAllDatabases isn't supported through mongos";
+ return false;
+ }
+ } cmdCloseAllDatabases;
+
} // namespace mongo