diff options
author | Antonin Kral <a.kral@bobek.cz> | 2011-12-15 09:35:47 +0100 |
---|---|---|
committer | Antonin Kral <a.kral@bobek.cz> | 2011-12-15 09:35:47 +0100 |
commit | f0d9a01bccdaeb466c12c92057914bbfef59526c (patch) | |
tree | 7679efa1f0daf7d1d906882a15dc77af6b7aef32 /shell | |
parent | 5d342a758c6095b4d30aba0750b54f13b8916f51 (diff) | |
download | mongodb-f0d9a01bccdaeb466c12c92057914bbfef59526c.tar.gz |
Imported Upstream version 2.0.2
Diffstat (limited to 'shell')
-rw-r--r-- | shell/collection.js | 188 | ||||
-rw-r--r-- | shell/dbshell.cpp | 5 | ||||
-rw-r--r-- | shell/mongo_vstudio.cpp | 4 | ||||
-rwxr-xr-x | shell/servers.js | 20 | ||||
-rw-r--r-- | shell/utils.js | 4 | ||||
-rw-r--r-- | shell/utils_sh.js | 16 |
6 files changed, 226 insertions, 11 deletions
diff --git a/shell/collection.js b/shell/collection.js index 1e6fe03..cb7035d 100644 --- a/shell/collection.js +++ b/shell/collection.js @@ -62,6 +62,7 @@ DBCollection.prototype.help = function () { print("\tdb." + shortName + ".update(query, object[, upsert_bool, multi_bool])"); print("\tdb." + shortName + ".validate( <full> ) - SLOW");; print("\tdb." + shortName + ".getShardVersion() - only for use with sharding"); + print("\tdb." + shortName + ".getShardDistribution() - prints statistics about data distribution in the cluster"); return __magicNoPrint; } @@ -654,3 +655,190 @@ DBCollection.autocomplete = function(obj){ } return ret; } + + +// Sharding additions + +/* +Usage : + +mongo <mongos> +> load('path-to-file/shardingAdditions.js') +Loading custom sharding extensions... +true + +> var collection = db.getMongo().getCollection("foo.bar") +> collection.getShardDistribution() // prints statistics related to the collection's data distribution + +> collection.getSplitKeysForChunks() // generates split points for all chunks in the collection, based on the + // default maxChunkSize or alternately a specified chunk size +> collection.getSplitKeysForChunks( 10 ) // Mb + +> var splitter = collection.getSplitKeysForChunks() // by default, the chunks are not split, the keys are just + // found. A splitter function is returned which will actually + // do the splits. + +> splitter() // ! Actually executes the splits on the cluster ! + +*/ + +DBCollection.prototype.getShardDistribution = function(){ + + var stats = this.stats() + + if( ! stats.sharded ){ + print( "Collection " + this + " is not sharded." ) + return + } + + var config = this.getMongo().getDB("config") + + var numChunks = 0 + + for( var shard in stats.shards ){ + + var shardDoc = config.shards.findOne({ _id : shard }) + + print( "\nShard " + shard + " at " + shardDoc.host ) + + var shardStats = stats.shards[ shard ] + + var chunks = config.chunks.find({ _id : sh._collRE( coll ), shard : shard }).toArray() + + numChunks += chunks.length + + var estChunkData = shardStats.size / chunks.length + var estChunkCount = Math.floor( shardStats.count / chunks.length ) + + print( " data : " + sh._dataFormat( shardStats.size ) + + " docs : " + shardStats.count + + " chunks : " + chunks.length ) + print( " estimated data per chunk : " + sh._dataFormat( estChunkData ) ) + print( " estimated docs per chunk : " + estChunkCount ) + + } + + print( "\nTotals" ) + print( " data : " + sh._dataFormat( stats.size ) + + " docs : " + stats.count + + " chunks : " + numChunks ) + for( var shard in stats.shards ){ + + var shardStats = stats.shards[ shard ] + + var estDataPercent = Math.floor( shardStats.size / stats.size * 10000 ) / 100 + var estDocPercent = Math.floor( shardStats.count / stats.count * 10000 ) / 100 + + print( " Shard " + shard + " contains " + estDataPercent + "% data, " + estDocPercent + "% docs in cluster, " + + "avg obj size on shard : " + sh._dataFormat( stats.shards[ shard ].avgObjSize ) ) + } + + print( "\n" ) + +} + +// In testing phase, use with caution +DBCollection.prototype._getSplitKeysForChunks = function( chunkSize ){ + + var stats = this.stats() + + if( ! stats.sharded ){ + print( "Collection " + this + " is not sharded." ) + return + } + + var config = this.getMongo().getDB("config") + + if( ! chunkSize ){ + chunkSize = config.settings.findOne({ _id : "chunksize" }).value + print( "Chunk size not set, using default of " + chunkSize + "Mb" ) + } + else{ + print( "Using chunk size of " + chunkSize + "Mb" ) + } + + var shardDocs = config.shards.find().toArray() + + var allSplitPoints = {} + var numSplits = 0 + + for( var i = 0; i < shardDocs.length; i++ ){ + + var shardDoc = shardDocs[i] + var shard = shardDoc._id + var host = shardDoc.host + var sconn = new Mongo( host ) + + var chunks = config.chunks.find({ _id : sh._collRE( this ), shard : shard }).toArray() + + print( "\nGetting split points for chunks on shard " + shard + " at " + host ) + + var splitPoints = [] + + for( var j = 0; j < chunks.length; j++ ){ + var chunk = chunks[j] + var result = sconn.getDB("admin").runCommand({ splitVector : this + "", min : chunk.min, max : chunk.max, maxChunkSize : chunkSize }) + if( ! result.ok ){ + print( " Had trouble getting split keys for chunk " + sh._pchunk( chunk ) + " :\n" ) + printjson( result ) + } + else{ + splitPoints = splitPoints.concat( result.splitKeys ) + + if( result.splitKeys.length > 0 ) + print( " Added " + result.splitKeys.length + " split points for chunk " + sh._pchunk( chunk ) ) + } + } + + print( "Total splits for shard " + shard + " : " + splitPoints.length ) + + numSplits += splitPoints.length + allSplitPoints[ shard ] = splitPoints + + } + + // Get most recent migration + var migration = config.changelog.find({ what : /^move.*/ }).sort({ time : -1 }).limit( 1 ).toArray() + if( migration.length == 0 ) + print( "\nNo migrations found in changelog." ) + else { + migration = migration[0] + print( "\nMost recent migration activity was on " + migration.ns + " at " + migration.time ) + } + + var admin = this.getMongo().getDB("admin") + var coll = this + var splitFunction = function(){ + + // Turn off the balancer, just to be safe + print( "Turning off balancer..." ) + config.settings.update({ _id : "balancer" }, { $set : { stopped : true } }, true ) + print( "Sleeping for 30s to allow balancers to detect change. To be extra safe, check config.changelog" + + " for recent migrations." ) + sleep( 30000 ) + + for( shard in allSplitPoints ){ + for( var i = 0; i < allSplitPoints[ shard ].length; i++ ){ + var splitKey = allSplitPoints[ shard ][i] + print( "Splitting at " + tojson( splitKey ) ) + printjson( admin.runCommand({ split : coll + "", middle : splitKey }) ) + } + } + + print( "Turning the balancer back on." ) + config.settings.update({ _id : "balancer" }, { $set : { stopped : false } } ) + sleep( 1 ) + } + + print( "\nGenerated " + numSplits + " split keys, run output function to perform splits.\n" + + " ex : \n" + + " > var splitter = <collection>.getSplitKeysForChunks()\n" + + " > splitter() // Execute splits on cluster !\n" ) + + return splitFunction + +} + + + + diff --git a/shell/dbshell.cpp b/shell/dbshell.cpp index f3122c7..443973f 100644 --- a/shell/dbshell.cpp +++ b/shell/dbshell.cpp @@ -398,13 +398,14 @@ string finishCode( string code ) { while ( ! isBalanced( code ) ) { inMultiLine = 1; code += "\n"; + // cancel multiline if two blank lines are entered + if ( code.find("\n\n\n") != string::npos ) + return ";"; char * line = shellReadline("... " , 1 ); if ( gotInterrupted ) return ""; if ( ! line ) return ""; - if ( code.find("\n\n") != string::npos ) // cancel multiline if two blank lines are entered - return ";"; while (startsWith(line, "... ")) line += 4; diff --git a/shell/mongo_vstudio.cpp b/shell/mongo_vstudio.cpp index 5496ddb..208d734 100644 --- a/shell/mongo_vstudio.cpp +++ b/shell/mongo_vstudio.cpp @@ -1005,8 +1005,8 @@ const StringData _jscode_raw_utils = "return {}\n" "}\n" "\n" -"testLog = function(x){\n" -"print( jsTestFile() + \" - \" + x )\n" +"jsTestLog = function(msg){\n" +"print( \"\\n\\n----\\n\" + msg + \"\\n----\\n\\n\" )\n" "}\n" "\n" "shellPrintHelper = function (x) {\n" diff --git a/shell/servers.js b/shell/servers.js index ad3b5eb..efbd9b6 100755 --- a/shell/servers.js +++ b/shell/servers.js @@ -235,7 +235,8 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other rs.awaitReplication(); var xxx = new Mongo( rs.getURL() ); xxx.name = rs.getURL(); - this._connections.push( xxx ); + this._connections.push( xxx ) + this["shard" + i] = xxx } this._configServers = [] @@ -260,6 +261,7 @@ ShardingTest = function( testName , numShards , verboseLevel , numMongos , other var conn = startMongodTest( 30000 + i , testName + i, 0, options ); this._alldbpaths.push( testName +i ) this._connections.push( conn ); + this["shard" + i] = conn } if ( otherParams.sync ){ @@ -544,7 +546,7 @@ printShardingStatus = function( configDB , verbose ){ output( "\t" + tojsononeline(db,"",true) ); if (db.partitioned){ - configDB.collections.find( { _id : new RegExp( "^" + db._id + "\." ) } ).sort( { _id : 1 } ).forEach( + configDB.collections.find( { _id : new RegExp( "^" + db._id + "\\." ) } ).sort( { _id : 1 } ).forEach( function( coll ){ if ( coll.dropped == false ){ output("\t\t" + coll._id + " chunks:"); @@ -760,8 +762,8 @@ ShardingTest.prototype.isSharded = function( collName ){ ShardingTest.prototype.shardGo = function( collName , key , split , move , dbName ){ - split = split || key; - move = move || split; + split = ( split != false ? ( split || key ) : split ) + move = ( split != false && move != false ? ( move || split ) : false ) if( collName.getDB ) dbName = "" + collName.getDB() @@ -782,12 +784,16 @@ ShardingTest.prototype.shardGo = function( collName , key , split , move , dbNam assert( false ) } + if( split == false ) return + result = this.s.adminCommand( { split : c , middle : split } ); if( ! result.ok ){ printjson( result ) assert( false ) } + if( move == false ) return + var result = null for( var i = 0; i < 5; i++ ){ result = this.s.adminCommand( { movechunk : c , find : move , to : this.getOther( this.getServer( dbName ) ).name } ); @@ -1854,7 +1860,11 @@ ReplSetTest.prototype.waitForIndicator = function( node, states, ind, timeout ){ printjson( status ) lastTime = new Date().getTime() } - + + if (typeof status.members == 'undefined') { + return false; + } + for( var i = 0; i < status.members.length; i++ ){ if( status.members[i].name == node.host ){ for( var j = 0; j < states.length; j++ ){ diff --git a/shell/utils.js b/shell/utils.js index 8380607..7d7a23b 100644 --- a/shell/utils.js +++ b/shell/utils.js @@ -1000,8 +1000,8 @@ jsTestOptions = function(){ return {} } -testLog = function(x){ - print( jsTestFile() + " - " + x ) +jsTestLog = function(msg){ + print( "\n\n----\n" + msg + "\n----\n\n" ) } shellPrintHelper = function (x) { diff --git a/shell/utils_sh.js b/shell/utils_sh.js index 5bd449b..297643f 100644 --- a/shell/utils_sh.js +++ b/shell/utils_sh.js @@ -23,6 +23,22 @@ sh._adminCommand = function( cmd , skipCheck ) { return res; } + +sh._dataFormat = function( bytes ){ + if( bytes < 1024 ) return Math.floor( bytes ) + "b" + if( bytes < 1024 * 1024 ) return Math.floor( bytes / 1024 ) + "kb" + if( bytes < 1024 * 1024 * 1024 ) return Math.floor( ( Math.floor( bytes / 1024 ) / 1024 ) * 100 ) / 100 + "Mb" + return Math.floor( ( Math.floor( bytes / ( 1024 * 1024 ) ) / 1024 ) * 100 ) / 100 + "Gb" +} + +sh._collRE = function( coll ){ + return RegExp( "^" + (coll + "").replace(/\./g, "\\.") + "-.*" ) +} + +sh._pchunk = function( chunk ){ + return "[" + tojson( chunk.min ) + " -> " + tojson( chunk.max ) + "]" +} + sh.help = function() { print( "\tsh.addShard( host ) server:port OR setname/server:port" ) print( "\tsh.enableSharding(dbname) enables sharding on the database dbname" ) |