_parsePath = function() { var dbpath = ""; for( var i = 0; i < arguments.length; ++i ) if ( arguments[ i ] == "--dbpath" ) dbpath = arguments[ i + 1 ]; if ( dbpath == "" ) throw "No dbpath specified"; return dbpath; } _parsePort = function() { var port = ""; for( var i = 0; i < arguments.length; ++i ) if ( arguments[ i ] == "--port" ) port = arguments[ i + 1 ]; if ( port == "" ) throw "No port specified"; return port; } connectionURLTheSame = function( a , b ){ if ( a == b ) return true; if ( ! a || ! b ) return false; a = a.split( "/" )[0] b = b.split( "/" )[0] return a == b; } assert( connectionURLTheSame( "foo" , "foo" ) ) assert( ! connectionURLTheSame( "foo" , "bar" ) ) assert( connectionURLTheSame( "foo/a,b" , "foo/b,a" ) ) assert( ! connectionURLTheSame( "foo/a,b" , "bar/a,b" ) ) createMongoArgs = function( binaryName , args ){ var fullArgs = [ binaryName ]; if ( args.length == 1 && isObject( args[0] ) ){ var o = args[0]; for ( var k in o ){ if ( o.hasOwnProperty(k) ){ if ( k == "v" && isNumber( o[k] ) ){ var n = o[k]; if ( n > 0 ){ if ( n > 10 ) n = 10; var temp = "-"; while ( n-- > 0 ) temp += "v"; fullArgs.push( temp ); } } else { fullArgs.push( "--" + k ); if ( o[k] != "" ) fullArgs.push( "" + o[k] ); } } } } else { for ( var i=0; i 0 ) rsName = name.substring( 0 , name.indexOf( "/" ) ); for ( var i=0; i " + tojsononeline( r.max ); } ShardingTest.prototype.printChangeLog = function(){ var s = this; this.config.changelog.find().forEach( function(z){ var msg = z.server + "\t" + z.time + "\t" + z.what; for ( i=z.what.length; i<15; i++ ) msg += " "; msg += " " + z.ns + "\t"; if ( z.what == "split" ){ msg += s._rangeToString( z.details.before ) + " -->> (" + s._rangeToString( z.details.left ) + "),(" + s._rangeToString( z.details.right ) + ")"; } else if (z.what == "multi-split" ){ msg += s._rangeToString( z.details.before ) + " -->> (" + z.details.number + "/" + z.details.of + " " + s._rangeToString( z.details.chunk ) + ")"; } else { msg += tojsononeline( z.details ); } print( "ShardingTest " + msg ) } ); } ShardingTest.prototype.getChunksString = function( ns ){ var q = {} if ( ns ) q.ns = ns; var s = ""; this.config.chunks.find( q ).sort( { ns : 1 , min : 1 } ).forEach( function(z){ s += " " + z._id + "\t" + z.lastmod.t + "|" + z.lastmod.i + "\t" + tojson(z.min) + " -> " + tojson(z.max) + " " + z.shard + " " + z.ns + "\n"; } ); return s; } ShardingTest.prototype.printChunks = function( ns ){ print( "ShardingTest " + this.getChunksString( ns ) ); } ShardingTest.prototype.printShardingStatus = function(){ printShardingStatus( this.config ); } ShardingTest.prototype.printCollectionInfo = function( ns , msg ){ var out = ""; if ( msg ) out += msg + "\n"; out += "sharding collection info: " + ns + "\n"; for ( var i=0; i> " + tojson( chunk.max ) + " on : " + chunk.shard + " " + tojson( chunk.lastmod ) ); } ); } else { output( "\t\t\ttoo many chunks to print, use verbose if you want to force print" ); } } } ) } } ); print( raw ); } printShardingSizes = function(){ configDB = db.getSisterDB('config') var version = configDB.getCollection( "version" ).findOne(); if ( version == null ){ print( "printShardingSizes : not a shard db!" ); return; } var raw = ""; var output = function(s){ raw += s + "\n"; } output( "--- Sharding Status --- " ); output( " sharding version: " + tojson( configDB.getCollection( "version" ).findOne() ) ); output( " shards:" ); var shards = {}; configDB.shards.find().forEach( function(z){ shards[z._id] = new Mongo(z.host); output( " " + tojson(z) ); } ); var saveDB = db; output( " databases:" ); configDB.databases.find().sort( { name : 1 } ).forEach( function(db){ output( "\t" + tojson(db,"",true) ); if (db.partitioned){ configDB.collections.find( { _id : new RegExp( "^" + db._id + "\." ) } ).sort( { _id : 1 } ).forEach( function( coll ){ output("\t\t" + coll._id + " chunks:"); configDB.chunks.find( { "ns" : coll._id } ).sort( { min : 1 } ).forEach( function(chunk){ var mydb = shards[chunk.shard].getDB(db._id) var out = mydb.runCommand({dataSize: coll._id, keyPattern: coll.key, min: chunk.min, max: chunk.max }); delete out.millis; delete out.ok; output( "\t\t\t" + tojson( chunk.min ) + " -->> " + tojson( chunk.max ) + " on : " + chunk.shard + " " + tojson( out ) ); } ); } ) } } ); print( raw ); } ShardingTest.prototype.sync = function(){ this.adminCommand( "connpoolsync" ); } ShardingTest.prototype.onNumShards = function( collName , dbName ){ this.sync(); // we should sync since we're going directly to mongod here dbName = dbName || "test"; var num=0; for ( var i=0; i 0 ) num++; return num; } ShardingTest.prototype.shardCounts = function( collName , dbName ){ this.sync(); // we should sync since we're going directly to mongod here dbName = dbName || "test"; var counts = {} for ( var i=0; i max ) max = c[s]; } print( "ShardingTest input: " + tojson( c ) + " min: " + min + " max: " + max ); return max - min; } ShardingTest.prototype.getShard = function( coll, query ){ var shards = this.getShards( coll, query ) assert.eq( shards.length, 1 ) return shards[0] } // Returns the shards on which documents matching a particular query reside ShardingTest.prototype.getShards = function( coll, query ){ if( ! coll.getDB ) coll = this.s.getCollection( coll ) var explain = coll.find( query ).explain() var shards = [] if( explain.shards ){ for( var shardName in explain.shards ){ for( var i = 0; i < explain.shards[shardName].length; i++ ){ if( explain.shards[shardName][i].n && explain.shards[shardName][i].n > 0 ) shards.push( shardName ) } } } for( var i = 0; i < shards.length; i++ ){ for( var j = 0; j < this._connections.length; j++ ){ if ( connectionURLTheSame( this._connections[j].name , shards[i] ) ){ shards[i] = this._connections[j] break; } } } return shards } ShardingTest.prototype.isSharded = function( collName ){ var collName = "" + collName var dbName = undefined if( typeof collName.getCollectionNames == 'function' ){ dbName = "" + collName collName = undefined } if( dbName ){ var x = this.config.databases.findOne( { _id : dbname } ) if( x ) return x.partitioned else return false } if( collName ){ var x = this.config.collections.findOne( { _id : collName } ) if( x ) return true else return false } } ShardingTest.prototype.shardGo = function( collName , key , split , move , dbName ){ split = ( split != false ? ( split || key ) : split ) move = ( split != false && move != false ? ( move || split ) : false ) if( collName.getDB ) dbName = "" + collName.getDB() else dbName = dbName || "test"; var c = dbName + "." + collName; if( collName.getDB ) c = "" + collName var isEmpty = this.s.getCollection( c ).count() == 0 if( ! this.isSharded( dbName ) ) this.s.adminCommand( { enableSharding : dbName } ) var result = this.s.adminCommand( { shardcollection : c , key : key } ) if( ! result.ok ){ printjson( result ) assert( false ) } if( split == false ) return result = this.s.adminCommand( { split : c , middle : split } ); if( ! result.ok ){ printjson( result ) assert( false ) } if( move == false ) return var result = null for( var i = 0; i < 5; i++ ){ result = this.s.adminCommand( { movechunk : c , find : move , to : this.getOther( this.getServer( dbName ) ).name } ); if( result.ok ) break; sleep( 5 * 1000 ); } printjson( result ) assert( result.ok ) }; ShardingTest.prototype.shardColl = ShardingTest.prototype.shardGo ShardingTest.prototype.setBalancer = function( balancer ){ if( balancer || balancer == undefined ){ this.config.settings.update( { _id: "balancer" }, { $set : { stopped: false } } , true ) } else if( balancer == false ){ this.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true ) } } /** * Run a mongod process. * * After initializing a MongodRunner, you must call start() on it. * @param {int} port port to run db on, use allocatePorts(num) to requision * @param {string} dbpath path to use * @param {boolean} peer pass in false (DEPRECATED, was used for replica pair host) * @param {boolean} arbiter pass in false (DEPRECATED, was used for replica pair host) * @param {array} extraArgs other arguments for the command line * @param {object} options other options include no_bind to not bind_ip to 127.0.0.1 * (necessary for replica set testing) */ MongodRunner = function( port, dbpath, peer, arbiter, extraArgs, options ) { this.port_ = port; this.dbpath_ = dbpath; this.peer_ = peer; this.arbiter_ = arbiter; this.extraArgs_ = extraArgs; this.options_ = options ? options : {}; }; /** * Start this mongod process. * * @param {boolean} reuseData If the data directory should be left intact (default is to wipe it) */ MongodRunner.prototype.start = function( reuseData ) { var args = []; if ( reuseData ) { args.push( "mongod" ); } args.push( "--port" ); args.push( this.port_ ); args.push( "--dbpath" ); args.push( this.dbpath_ ); args.push( "--nohttpinterface" ); args.push( "--noprealloc" ); args.push( "--smallfiles" ); if (!this.options_.no_bind) { args.push( "--bind_ip" ); args.push( "127.0.0.1" ); } if ( this.extraArgs_ ) { args = args.concat( this.extraArgs_ ); } removeFile( this.dbpath_ + "/mongod.lock" ); if ( reuseData ) { return startMongoProgram.apply( null, args ); } else { return startMongod.apply( null, args ); } } MongodRunner.prototype.port = function() { return this.port_; } MongodRunner.prototype.toString = function() { return [ this.port_, this.dbpath_, this.peer_, this.arbiter_ ].toString(); } ToolTest = function( name ){ this.name = name; this.port = allocatePorts(1)[0]; this.baseName = "jstests_tool_" + name; this.root = "/data/db/" + this.baseName; this.dbpath = this.root + "/"; this.ext = this.root + "_external/"; this.extFile = this.root + "_external/a"; resetDbpath( this.dbpath ); } ToolTest.prototype.startDB = function( coll ){ assert( ! this.m , "db already running" ); this.m = startMongoProgram( "mongod" , "--port", this.port , "--dbpath" , this.dbpath , "--nohttpinterface", "--noprealloc" , "--smallfiles" , "--bind_ip", "127.0.0.1" ); this.db = this.m.getDB( this.baseName ); if ( coll ) return this.db.getCollection( coll ); return this.db; } ToolTest.prototype.stop = function(){ if ( ! this.m ) return; stopMongod( this.port ); this.m = null; this.db = null; print('*** ' + this.name + " completed successfully ***"); } ToolTest.prototype.runTool = function(){ var a = [ "mongo" + arguments[0] ]; var hasdbpath = false; for ( var i=1; i timeout) { throw('[' + opts['desc'] + ']' + " timed out after " + timeout + "ms ( " + tries + " tries )"); } } return result; } ReplSetTest.prototype.initiate = function( cfg , initCmd , timeout ) { var master = this.nodes[0].getDB("admin"); var config = cfg || this.getReplSetConfig(); var cmd = {}; var cmdKey = initCmd || 'replSetInitiate'; var timeout = timeout || 30000; cmd[cmdKey] = config; printjson(cmd); this.attempt({timeout: timeout, desc: "Initiate replica set"}, function() { var result = master.runCommand(cmd); printjson(result); return result['ok'] == 1; }); } ReplSetTest.prototype.reInitiate = function() { var master = this.nodes[0]; var c = master.getDB("local")['system.replset'].findOne(); var config = this.getReplSetConfig(); config.version = c.version + 1; this.initiate( config , 'replSetReconfig' ); } ReplSetTest.prototype.getLastOpTimeWritten = function() { this.getMaster(); this.attempt({context : this, desc : "awaiting oplog query"}, function() { try { this.latest = this.liveNodes.master.getDB("local")['oplog.rs'].find({}).sort({'$natural': -1}).limit(1).next()['ts']; } catch(e) { print("ReplSetTest caught exception " + e); return false; } return true; }); }; ReplSetTest.prototype.awaitReplication = function(timeout) { timeout = timeout || 30000; this.getLastOpTimeWritten(); print("ReplSetTest " + this.latest); this.attempt({context: this, timeout: timeout, desc: "awaiting replication"}, function() { try { var synced = true; for(var i=0; i lastTime ){ if( lastTime == null ) print( "ReplSetTest waitForIndicator Initial status ( timeout : " + timeout + " ) :" ) printjson( status ) lastTime = new Date().getTime() } if (typeof status.members == 'undefined') { return false; } for( var i = 0; i < status.members.length; i++ ){ if( status.members[i].name == node.host ){ for( var j = 0; j < states.length; j++ ){ if( status.members[i][ind] == states[j] ) return true; } } } return false }); print( "ReplSetTest waitForIndicator final status:" ) printjson( status ) } ReplSetTest.Health = {} ReplSetTest.Health.UP = 1 ReplSetTest.Health.DOWN = 0 ReplSetTest.State = {} ReplSetTest.State.PRIMARY = 1 ReplSetTest.State.SECONDARY = 2 ReplSetTest.State.RECOVERING = 3 /** * Overflows a replica set secondary or secondaries, specified by id or conn. */ ReplSetTest.prototype.overflow = function( secondaries ){ // Create a new collection to overflow, allow secondaries to replicate var master = this.getMaster() var overflowColl = master.getCollection( "_overflow.coll" ) overflowColl.insert({ replicated : "value" }) this.awaitReplication() this.stop( secondaries, undefined, 5 * 60 * 1000 ) var count = master.getDB("local").oplog.rs.count(); var prevCount = -1; // Keep inserting till we hit our capped coll limits while (count != prevCount) { print("ReplSetTest overflow inserting 10000"); for (var i = 0; i < 10000; i++) { overflowColl.insert({ overflow : "value" }); } prevCount = count; this.awaitReplication(); count = master.getDB("local").oplog.rs.count(); print( "ReplSetTest overflow count : " + count + " prev : " + prevCount ); } // Restart all our secondaries and wait for recovery state this.start( secondaries, { remember : true }, true, true ) this.waitForState( secondaries, this.RECOVERING, 5 * 60 * 1000 ) } /** * Bridging allows you to test network partitioning. For example, you can set * up a replica set, run bridge(), then kill the connection between any two * nodes x and y with partition(x, y). * * Once you have called bridging, you cannot reconfigure the replica set. */ ReplSetTest.prototype.bridge = function() { if (this.bridges) { print("ReplSetTest bridge bridges have already been created!"); return; } var n = this.nodes.length; // create bridges this.bridges = []; for (var i=0; i1, 0->2, 1->0, 1->2, 2->0, 2->1. We can kill * the connection between nodes 0 and 2 by calling replTest.partition(0,2) or * replTest.partition(2,0) (either way is identical). Then the replica set would * have the following bridges: 0->1, 1->0, 1->2, 2->1. */ ReplSetTest.prototype.partition = function(from, to) { this.bridges[from][to].stop(); this.bridges[to][from].stop(); }; /** * This reverses a partition created by partition() above. */ ReplSetTest.prototype.unPartition = function(from, to) { this.bridges[from][to].start(); this.bridges[to][from].start(); }; ReplSetBridge = function(rst, from, to) { var n = rst.nodes.length; var startPort = rst.startPort+n; this.port = (startPort+(from*n+to)); this.host = rst.host+":"+this.port; this.dest = rst.host+":"+rst.ports[to]; this.start(); }; ReplSetBridge.prototype.start = function() { var args = ["mongobridge", "--port", this.port, "--dest", this.dest]; print("ReplSetBridge starting: "+tojson(args)); this.bridge = startMongoProgram.apply( null , args ); print("ReplSetBridge started " + this.bridge); }; ReplSetBridge.prototype.stop = function() { print("ReplSetBridge stopping: " + this.port); stopMongod(this.port); }; ReplSetBridge.prototype.toString = function() { return this.host+" -> "+this.dest; };