summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
authorAntonin Kral <a.kral@bobek.cz>2011-09-14 17:08:06 +0200
committerAntonin Kral <a.kral@bobek.cz>2011-09-14 17:08:06 +0200
commit5d342a758c6095b4d30aba0750b54f13b8916f51 (patch)
tree762e9aa84781f5e3b96db2c02d356c29cf0217c0 /jstests/sharding
parentcbe2d992e9cd1ea66af9fa91df006106775d3073 (diff)
downloadmongodb-5d342a758c6095b4d30aba0750b54f13b8916f51.tar.gz
Imported Upstream version 2.0.0
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/addshard1.js2
-rw-r--r--jstests/sharding/addshard4.js26
-rw-r--r--jstests/sharding/array_shard_key.js127
-rw-r--r--jstests/sharding/auth.js177
-rw-r--r--jstests/sharding/count_slaveok.js69
-rw-r--r--jstests/sharding/drop_sharded_db.js62
-rw-r--r--jstests/sharding/features2.js11
-rw-r--r--jstests/sharding/features3.js61
-rw-r--r--jstests/sharding/group_slaveok.js68
-rw-r--r--jstests/sharding/index1.js174
-rw-r--r--jstests/sharding/migrateBig.js2
-rw-r--r--jstests/sharding/migrateMemory.js54
-rw-r--r--jstests/sharding/multi_mongos1.js3
-rw-r--r--jstests/sharding/multi_mongos2.js61
-rw-r--r--jstests/sharding/parallel.js38
-rw-r--r--jstests/sharding/shard3.js12
-rw-r--r--jstests/sharding/shard6.js3
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js3
-rw-r--r--jstests/sharding/shard_keycount.js45
-rw-r--r--jstests/sharding/sharding_with_keyfile.js69
-rwxr-xr-xjstests/sharding/sharding_with_keyfile.key3
-rw-r--r--jstests/sharding/sync6.js81
-rw-r--r--jstests/sharding/sync7.js63
23 files changed, 1188 insertions, 26 deletions
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js
index f28feed..0ca6a83 100644
--- a/jstests/sharding/addshard1.js
+++ b/jstests/sharding/addshard1.js
@@ -44,7 +44,7 @@ assert.eq( s.normalize( s.config.databases.findOne( { _id : "testDB" } ).primary
assert.eq( numObjs , sdb1.foo.count() , "wrong count after moving datbase that existed before addshard" );
// make sure we can shard the original collections
-sdb1.foo.ensureIndex( { a : 1 } ) // can't shard populated collection without an index
+sdb1.foo.ensureIndex( { a : 1 }, { unique : true } ) // can't shard populated collection without an index
s.adminCommand( { enablesharding : "testDB" } );
s.adminCommand( { shardcollection : "testDB.foo" , key: { a : 1 } } );
s.adminCommand( { split : "testDB.foo", middle: { a : Math.floor(numObjs/2) } } );
diff --git a/jstests/sharding/addshard4.js b/jstests/sharding/addshard4.js
index be4a8b3..4a44b55 100644
--- a/jstests/sharding/addshard4.js
+++ b/jstests/sharding/addshard4.js
@@ -2,15 +2,18 @@
s = new ShardingTest( "addshard4", 2 , 0 , 1 , {useHostname : true});
-r = new ReplSetTest({name : "addshard4", nodes : 3, startPort : 34000});
+r = new ReplSetTest({name : "addshard4", nodes : 3, startPort : 31100});
r.startSet();
var config = r.getReplSetConfig();
config.members[2].priority = 0;
r.initiate(config);
+//Wait for replica set to be fully initialized - could take some time
+//to pre-allocate files on slow systems
+r.awaitReplication();
-var master = r.getMaster().master;
+var master = r.getMaster();
var members = config.members.map(function(elem) { return elem.host; });
var shardName = "addshard4/"+members.join(",");
@@ -20,5 +23,24 @@ print("adding shard "+shardName);
var result = s.adminCommand({"addshard" : shardName});
printjson(result);
+assert.eq(result, true);
+r = new ReplSetTest({name : "addshard42", nodes : 3, startPort : 31200});
+r.startSet();
+
+config = r.getReplSetConfig();
+config.members[2].arbiterOnly = true;
+
+r.initiate(config);
+// Wait for replica set to be fully initialized - could take some time
+// to pre-allocate files on slow systems
+r.awaitReplication();
+master = r.getMaster();
+
+print("adding shard addshard42");
+
+result = s.adminCommand({"addshard" : "addshard42/"+config.members[2].host});
+
+printjson(result);
+assert.eq(result, true);
diff --git a/jstests/sharding/array_shard_key.js b/jstests/sharding/array_shard_key.js
new file mode 100644
index 0000000..1ea61e8
--- /dev/null
+++ b/jstests/sharding/array_shard_key.js
@@ -0,0 +1,127 @@
+// Ensure you can't shard on an array key
+
+var st = new ShardingTest({ name : jsTestName(), shards : 3 })
+
+var mongos = st.s0
+
+var coll = mongos.getCollection( jsTestName() + ".foo" )
+
+st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } )
+
+printjson( mongos.getDB("config").chunks.find().toArray() )
+
+st.printShardingStatus()
+
+print( "1: insert some invalid data" )
+
+var value = null
+
+var checkError = function( shouldError ){
+ var error = coll.getDB().getLastError()
+
+ if( error != null ) printjson( error )
+
+ if( error == null && ! shouldError ) return
+ if( error != null && shouldError ) return
+
+ if( error == null ) print( "No error detected!" )
+ else print( "Unexpected error!" )
+
+ assert( false )
+}
+
+// Insert an object with invalid array key
+coll.insert({ i : [ 1, 2 ] })
+checkError( true )
+
+// Insert an object with valid array key
+coll.insert({ i : 1 })
+checkError( false )
+
+// Update the value with valid other field
+value = coll.findOne({ i : 1 })
+coll.update( value, { $set : { j : 2 } } )
+checkError( false )
+
+// Update the value with invalid other fields
+value = coll.findOne({ i : 1 })
+coll.update( value, Object.merge( value, { i : [ 3 ] } ) )
+checkError( true )
+
+// Multi-update the value with invalid other fields
+value = coll.findOne({ i : 1 })
+coll.update( value, Object.merge( value, { i : [ 3, 4 ] } ), false, true)
+checkError( true )
+
+// Single update the value with valid other fields
+value = coll.findOne({ i : 1 })
+coll.update( Object.merge( value, { i : [ 3, 4 ] } ), value )
+checkError( true )
+
+// Multi-update the value with other fields (won't work, but no error)
+value = coll.findOne({ i : 1 })
+coll.update( Object.merge( value, { i : [ 1, 1 ] } ), { $set : { k : 4 } }, false, true)
+checkError( false )
+
+// Query the value with other fields (won't work, but no error)
+value = coll.findOne({ i : 1 })
+coll.find( Object.merge( value, { i : [ 1, 1 ] } ) ).toArray()
+checkError( false )
+
+// Can't remove using multikey, but shouldn't error
+value = coll.findOne({ i : 1 })
+coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4 ] } ) )
+checkError( false )
+
+// Can't remove using multikey, but shouldn't error
+value = coll.findOne({ i : 1 })
+coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4, 5 ] } ) )
+error = coll.getDB().getLastError()
+assert.eq( error, null )
+assert.eq( coll.find().itcount(), 1 )
+
+value = coll.findOne({ i : 1 })
+coll.remove( Object.extend( value, { i : 1 } ) )
+error = coll.getDB().getLastError()
+assert.eq( error, null )
+assert.eq( coll.find().itcount(), 0 )
+
+printjson( "Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey" )
+
+// Insert a bunch of data then shard over key which is an array
+var coll = mongos.getCollection( "" + coll + "2" )
+for( var i = 0; i < 10; i++ ){
+ // TODO : does not check weird cases like [ i, i ]
+ coll.insert({ i : [ i, i + 1 ] })
+ checkError( false )
+}
+
+coll.ensureIndex({ _id : 1, i : 1 })
+
+try {
+ st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } )
+}
+catch( e ){
+ print( "Correctly threw error on sharding with multikey index." )
+}
+
+st.printShardingStatus()
+
+// Insert a bunch of data then shard over key which is not an array
+var coll = mongos.getCollection( "" + coll + "3" )
+for( var i = 0; i < 10; i++ ){
+ // TODO : does not check weird cases like [ i, i ]
+ coll.insert({ i : i })
+ checkError( false )
+}
+
+coll.ensureIndex({ _id : 1, i : 1 })
+
+st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } )
+
+st.printShardingStatus()
+
+
+
+// Finish
+st.stop()
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
new file mode 100644
index 0000000..8d8d7d7
--- /dev/null
+++ b/jstests/sharding/auth.js
@@ -0,0 +1,177 @@
+
+adminUser = {
+ db : "admin",
+ username : "foo",
+ password : "bar"
+};
+
+testUser = {
+ db : "test",
+ username : "bar",
+ password : "baz"
+};
+
+function login(userObj) {
+ var n = s.getDB(userObj.db).runCommand({getnonce: 1});
+ var a = s.getDB(userObj.db).runCommand({authenticate: 1, user: userObj.username, nonce: n.nonce, key: s.getDB("admin").__pwHash(n.nonce, userObj.username, userObj.password)});
+ printjson(a);
+}
+
+function logout(userObj) {
+ s.getDB(userObj.db).runCommand({logout:1});
+}
+
+function getShardName(rsTest) {
+ var master = rsTest.getMaster();
+ var config = master.getDB("local").system.replset.findOne();
+ var members = config.members.map(function(elem) { return elem.host; });
+ return config._id+"/"+members.join(",");
+}
+
+var s = new ShardingTest( "auth1", 0 , 0 , 1 , {rs: true, extraOptions : {"keyFile" : "jstests/libs/key1"}, noChunkSize : true});
+
+print("logging in first, if there was an unclean shutdown the user might already exist");
+login(adminUser);
+
+var user = s.getDB("admin").system.users.findOne();
+if (user) {
+ print("user already exists");
+ printjson(user);
+}
+else {
+ print("adding user");
+ s.getDB(adminUser.db).addUser(adminUser.username, adminUser.password);
+}
+
+login(adminUser);
+s.getDB( "config" ).settings.update( { _id : "chunksize" }, {$set : {value : 1 }}, true );
+printjson(s.getDB("config").runCommand({getlasterror:1}));
+printjson(s.getDB("config").settings.find().toArray());
+
+print("restart mongos");
+stopMongoProgram(31000);
+var opts = { port : 31000, v : 0, configdb : s._configDB, keyFile : "jstests/libs/key1", chunkSize : 1 };
+var conn = startMongos( opts );
+s.s = s._mongos[0] = s["s0"] = conn;
+
+login(adminUser);
+
+d1 = new ReplSetTest({name : "d1", nodes : 3, startPort : 31100});
+d1.startSet({keyFile : "jstests/libs/key2"});
+d1.initiate();
+
+print("initiated");
+var shardName = getShardName(d1);
+
+print("adding shard w/out auth "+shardName);
+logout(adminUser);
+
+var result = s.getDB("admin").runCommand({addShard : shardName});
+printjson(result);
+assert.eq(result.errmsg, "unauthorized");
+
+login(adminUser);
+
+print("adding shard w/wrong key "+shardName);
+
+var thrown = false;
+try {
+ result = s.adminCommand({addShard : shardName});
+}
+catch(e) {
+ thrown = true;
+ printjson(e);
+}
+assert(thrown);
+
+print("start rs w/correct key");
+d1.stopSet();
+d1.startSet({keyFile : "jstests/libs/key1"});
+d1.initiate();
+var master = d1.getMaster();
+
+print("adding shard w/auth "+shardName);
+
+result = s.getDB("admin").runCommand({addShard : shardName});
+assert.eq(result.ok, 1, tojson(result));
+
+s.getDB("admin").runCommand({enableSharding : "test"});
+s.getDB("admin").runCommand({shardCollection : "test.foo", key : {x : 1}});
+
+s.getDB(testUser.db).addUser(testUser.username, testUser.password);
+
+logout(adminUser);
+
+print("query try");
+var e = assert.throws(function() {
+ conn.getDB("foo").bar.findOne();
+});
+printjson(e);
+
+print("cmd try");
+e = assert.throws(function() {
+ conn.getDB("foo").runCommand({listdbs:1});
+});
+printjson(e);
+
+print("insert try 1");
+s.getDB("test").foo.insert({x:1});
+result = s.getDB("test").runCommand({getLastError : 1});
+assert.eq(result.err, "unauthorized");
+
+logout(adminUser);
+
+login(testUser);
+
+print("insert try 2");
+s.getDB("test").foo.insert({x:1});
+result = s.getDB("test").runCommand({getLastError : 1});
+assert.eq(result.err, null);
+
+logout(testUser);
+
+d2 = new ReplSetTest({name : "d2", nodes : 3, startPort : 31200});
+d2.startSet({keyFile : "jstests/libs/key1"});
+d2.initiate();
+
+shardName = getShardName(d2);
+
+print("adding shard "+shardName);
+login(adminUser);
+print("logged in");
+result = s.getDB("admin").runCommand({addShard : shardName})
+
+var num = 100000;
+for (i=0; i<num; i++) {
+ s.getDB("test").foo.insert({x:i, abc : "defg", date : new Date(), str : "all the talk on the market"});
+}
+
+var d1Chunks = s.getDB("config").chunks.count({shard : "d1"});
+var d2Chunks = s.getDB("config").chunks.count({shard : "d2"});
+var totalChunks = s.getDB("config").chunks.count({ns : "test.foo"});
+
+print("chunks: " + d1Chunks+" "+d2Chunks+" "+totalChunks);
+
+assert(d1Chunks > 0 && d2Chunks > 0 && d1Chunks+d2Chunks == totalChunks);
+
+assert.eq(s.getDB("test").foo.count(), num+1);
+
+s.s.setSlaveOk();
+
+var cursor = s.getDB("test").foo.find({x:{$lt : 500}});
+
+var count = 0;
+while (cursor.hasNext()) {
+ cursor.next();
+ count++;
+}
+
+assert.eq(count, 501);
+
+// check that dump doesn't get stuck with auth
+var x = runMongoProgram( "mongodump", "--host", "127.0.0.1:31000", "-d", testUser.db, "-u", testUser.username, "-p", testUser.password);
+
+print("result: "+x);
+
+
+s.stop();
diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js
new file mode 100644
index 0000000..075ab41
--- /dev/null
+++ b/jstests/sharding/count_slaveok.js
@@ -0,0 +1,69 @@
+// Tests count and distinct using slaveOk
+
+var st = new ShardingTest( testName = "countSlaveOk",
+ numShards = 1,
+ verboseLevel = 0,
+ numMongos = 1,
+ { rs : true,
+ rs0 : { nodes : 2 }
+ })
+
+var rst = st._rs[0].test
+
+// Insert data into replica set
+var conn = new Mongo( st.s.host )
+conn.setLogLevel( 3 )
+
+var coll = conn.getCollection( "test.countSlaveOk" )
+coll.drop()
+
+for( var i = 0; i < 300; i++ ){
+ coll.insert( { i : i % 10 } )
+}
+
+var connA = conn
+var connB = new Mongo( st.s.host )
+var connC = new Mongo( st.s.host )
+
+// Make sure the writes get through, otherwise we can continue to error these one-at-a-time
+coll.getDB().getLastError()
+
+st.printShardingStatus()
+
+// Wait for client to update itself and replication to finish
+rst.awaitReplication()
+
+var primary = rst.getPrimary()
+var sec = rst.getSecondary()
+
+// Data now inserted... stop the master, since only two in set, other will still be secondary
+rst.stop( rst.getMaster(), undefined, true )
+printjson( rst.status() )
+
+// Wait for the mongos to recognize the slave
+ReplSetTest.awaitRSClientHosts( conn, sec, { ok : true, secondary : true } )
+
+// Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
+// master is down
+conn.setSlaveOk()
+
+// Should throw exception, since not slaveOk'd
+assert.eq( 30, coll.find({ i : 0 }).count() )
+assert.eq( 10, coll.distinct("i").length )
+
+try {
+
+ conn.setSlaveOk( false )
+ coll.find({ i : 0 }).count()
+
+ print( "Should not reach here!" )
+ printjson( coll.getDB().getLastError() )
+ assert( false )
+
+}
+catch( e ){
+ print( "Non-slaveOk'd connection failed." )
+}
+
+// Finish
+st.stop()
diff --git a/jstests/sharding/drop_sharded_db.js b/jstests/sharding/drop_sharded_db.js
new file mode 100644
index 0000000..aedde8f
--- /dev/null
+++ b/jstests/sharding/drop_sharded_db.js
@@ -0,0 +1,62 @@
+// Tests the dropping of a sharded database SERVER-3471 SERVER-1726
+
+var st = new ShardingTest({ name : jsTestName() })
+
+var mongos = st.s0
+var config = mongos.getDB( "config" )
+
+var dbName = "buy"
+var dbA = mongos.getDB( dbName )
+var dbB = mongos.getDB( dbName + "_201107" )
+var dbC = mongos.getDB( dbName + "_201108" )
+
+print( "1: insert some data and colls into all dbs" )
+
+var numDocs = 3000;
+var numColls = 10;
+for( var i = 0; i < numDocs; i++ ){
+ dbA.getCollection( "data" + (i % numColls) ).insert({ _id : i })
+ dbB.getCollection( "data" + (i % numColls) ).insert({ _id : i })
+ dbC.getCollection( "data" + (i % numColls) ).insert({ _id : i })
+}
+
+print( "2: shard the colls ")
+
+for( var i = 0; i < numColls; i++ ){
+
+ var key = { _id : 1 }
+ st.shardColl( dbA.getCollection( "data" + i ), key )
+ st.shardColl( dbB.getCollection( "data" + i ), key )
+ st.shardColl( dbC.getCollection( "data" + i ), key )
+
+}
+
+print( "3: drop the non-suffixed db ")
+
+dbA.dropDatabase()
+
+
+print( "3: ensure only the non-suffixed db was dropped ")
+
+var dbs = mongos.getDBNames()
+for( var i = 0; i < dbs.length; i++ ){
+ assert.neq( dbs, "" + dbA )
+}
+
+assert.eq( 0, config.databases.find({ _id : "" + dbA }).toArray().length )
+assert.eq( 1, config.databases.find({ _id : "" + dbB }).toArray().length )
+assert.eq( 1, config.databases.find({ _id : "" + dbC }).toArray().length )
+
+assert.eq( numColls, config.collections.find({ _id : RegExp( "^" + dbA + "\\..*" ), dropped : true }).toArray().length )
+assert.eq( numColls, config.collections.find({ _id : RegExp( "^" + dbB + "\\..*" ), dropped : false }).toArray().length )
+assert.eq( numColls, config.collections.find({ _id : RegExp( "^" + dbC + "\\..*" ), dropped : false }).toArray().length )
+
+for( var i = 0; i < numColls; i++ ){
+
+ assert.eq( numDocs / numColls, dbB.getCollection( "data" + (i % numColls) ).find().itcount() )
+ assert.eq( numDocs / numColls, dbC.getCollection( "data" + (i % numColls) ).find().itcount() )
+
+}
+
+// Finish
+st.stop()
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index b2070ea..67a9abe 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -97,7 +97,7 @@ doMR = function( n ){
var res = db.mr.mapReduce( m , r , "smr1_out" );
printjson( res );
- assert.eq( new NumberLong(4) , res.counts.input , "MR T0 " + n );
+ assert.eq( 4 , res.counts.input , "MR T0 " + n );
var x = db[res.result];
assert.eq( 3 , x.find().count() , "MR T1 " + n );
@@ -115,7 +115,7 @@ doMR = function( n ){
var res = db.mr.mapReduce( m , r , { out : { inline : 1 } } );
printjson( res );
- assert.eq( new NumberLong(4) , res.counts.input , "MR T6 " + n );
+ assert.eq( 4 , res.counts.input , "MR T6 " + n );
var z = {};
res.find().forEach( function(a){ z[a._id] = a.value.count; } );
@@ -173,4 +173,11 @@ catch ( e ){
assert.eq( x , y , "assert format" )
+// isMaster and query-wrapped-command
+isMaster = db.runCommand({isMaster:1});
+assert( isMaster.ismaster );
+assert.eq( 'isdbgrid', isMaster.msg );
+assert.eq( isMaster, db.runCommand({query: {isMaster:1}}) );
+assert.eq( isMaster, db.runCommand({$query: {isMaster:1}}) );
+
s.stop();
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
index b28d88e..5277d22 100644
--- a/jstests/sharding/features3.js
+++ b/jstests/sharding/features3.js
@@ -17,52 +17,79 @@ for ( i=0; i<N; i++ )
db.foo.insert( { _id : i } )
db.getLastError();
x = db.foo.stats();
+assert.eq( "test.foo" , x.ns , "basic1" )
+assert( x.sharded , "basic2" )
assert.eq( N , x.count , "total count" )
assert.eq( N / 2 , x.shards.shard0000.count , "count on shard0000" )
assert.eq( N / 2 , x.shards.shard0001.count , "count on shard0001" )
+assert( x.totalIndexSize > 0 )
+assert( x.numExtents > 0 )
+db.bar.insert( { x : 1 } )
+x = db.bar.stats();
+assert.eq( 1 , x.count , "XXX1" )
+assert.eq( "test.bar" , x.ns , "XXX2" )
+assert( ! x.sharded , "XXX3: " + tojson(x) )
+
+// Fork shell and start pulling back data
start = new Date()
print( "about to fork shell: " + Date() )
-join = startParallelShell( "db.foo.find( function(){ x = ''; for ( i=0; i<10000; i++ ){ x+=i; } return true; } ).itcount()" )
+
+// TODO: Still potential problem when our sampling of current ops misses when $where is active -
+// solution is to increase sleep time
+parallelCommand = "try { while(true){" +
+ " db.foo.find( function(){ x = ''; for ( i=0; i<10000; i++ ){ x+=i; } sleep( 1000 ); return true; } ).itcount() " +
+ "}} catch(e){ print('PShell execution ended:'); printjson( e ) }"
+
+join = startParallelShell( parallelCommand )
print( "after forking shell: " + Date() )
+// Get all current $where operations
function getMine( printInprog ){
+
var inprog = db.currentOp().inprog;
+
if ( printInprog )
printjson( inprog )
+
+ // Find all the where queries
var mine = []
for ( var x=0; x<inprog.length; x++ ){
if ( inprog[x].query && inprog[x].query.$where ){
mine.push( inprog[x] )
}
}
+
return mine;
}
-state = 0; // 0 = not found, 1 = killed,
-killTime = null;
+var state = 0; // 0 = not found, 1 = killed,
+var killTime = null;
+var i = 0;
-for ( i=0; i<( 100* 1000 ); i++ ){
+assert.soon( function(){
+
+ // Get all the current operations
mine = getMine( state == 0 && i > 20 );
- if ( state == 0 ){
- if ( mine.length == 0 ){
- sleep(1);
- continue;
- }
+ i++;
+
+ // Wait for the queries to start
+ if ( state == 0 && mine.length > 0 ){
+ // Queries started
state = 1;
+ // Kill all $where
mine.forEach( function(z){ printjson( db.getSisterDB( "admin" ).killOp( z.opid ) ); } )
killTime = new Date()
}
- else if ( state == 1 ){
- if ( mine.length == 0 ){
- state = 2;
- break;
- }
- sleep(1)
- continue;
+ // Wait for killed queries to end
+ else if ( state == 1 && mine.length == 0 ){
+ // Queries ended
+ state = 2;
+ return true;
}
-}
+
+}, "Couldn't kill the $where operations.", 2 * 60 * 1000 )
print( "after loop: " + Date() );
assert( killTime , "timed out waiting too kill last mine:" + tojson(mine) )
diff --git a/jstests/sharding/group_slaveok.js b/jstests/sharding/group_slaveok.js
new file mode 100644
index 0000000..3b7cec4
--- /dev/null
+++ b/jstests/sharding/group_slaveok.js
@@ -0,0 +1,68 @@
+// Tests group using slaveOk
+
+var st = new ShardingTest( testName = "groupSlaveOk",
+ numShards = 1,
+ verboseLevel = 0,
+ numMongos = 1,
+ { rs : true,
+ rs0 : { nodes : 2 }
+ })
+
+var rst = st._rs[0].test
+
+// Insert data into replica set
+var conn = new Mongo( st.s.host )
+conn.setLogLevel( 3 )
+
+var coll = conn.getCollection( "test.groupSlaveOk" )
+coll.drop()
+
+for( var i = 0; i < 300; i++ ){
+ coll.insert( { i : i % 10 } )
+}
+
+// Make sure the writes get through, otherwise we can continue to error these one-at-a-time
+coll.getDB().getLastError()
+
+st.printShardingStatus()
+
+// Wait for client to update itself and replication to finish
+rst.awaitReplication()
+
+var primary = rst.getPrimary()
+var sec = rst.getSecondary()
+
+// Data now inserted... stop the master, since only two in set, other will still be secondary
+rst.stop( rst.getMaster(), undefined, true )
+printjson( rst.status() )
+
+// Wait for the mongos to recognize the slave
+ReplSetTest.awaitRSClientHosts( conn, sec, { ok : true, secondary : true } )
+
+// Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
+// master is down
+conn.setSlaveOk()
+
+// Should not throw exception, since slaveOk'd
+assert.eq( 10, coll.group({ key : { i : true } ,
+ reduce : function( obj, ctx ){ ctx.count += 1 } ,
+ initial : { count : 0 } }).length )
+
+try {
+
+ conn.setSlaveOk( false )
+ coll.group({ key : { i : true } ,
+ reduce : function( obj, ctx ){ ctx.count += 1 } ,
+ initial : { count : 0 } })
+
+ print( "Should not reach here!" )
+ printjson( coll.getDB().getLastError() )
+ assert( false )
+
+}
+catch( e ){
+ print( "Non-slaveOk'd connection failed." )
+}
+
+// Finish
+st.stop()
diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js
new file mode 100644
index 0000000..6f99449
--- /dev/null
+++ b/jstests/sharding/index1.js
@@ -0,0 +1,174 @@
+// from server 2326 - make sure that sharding only works with unique indices
+
+s = new ShardingTest( "shard_index", 2, 50, 1 )
+
+// Regenerate fully because of SERVER-2782
+for ( var i = 0; i < 10; i++ ) {
+
+ var coll = s.admin._mongo.getDB( "test" ).getCollection( "foo" + i )
+ coll.drop()
+
+ for ( var j = 0; j < 300; j++ ) {
+ coll.insert( { num : j, x : 1 } )
+ }
+
+ if(i == 0) s.adminCommand( { enablesharding : "" + coll._db } );
+
+ print("\n\n\n\n\nTest # " + i)
+
+ if ( i == 0 ) {
+
+ // Unique index exists, but not the right one.
+ coll.ensureIndex( { num : 1 }, { unique : true } )
+ coll.ensureIndex( { x : 1 } )
+
+ passed = false
+ try {
+ s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } )
+ passed = true
+ } catch (e) {
+ print( e )
+ }
+ assert( !passed, "Should not shard collection when another unique index exists!")
+
+ }
+ if ( i == 1 ) {
+
+ // Unique index exists as prefix, also index exists
+ coll.ensureIndex( { x : 1 } )
+ coll.ensureIndex( { x : 1, num : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } })
+ }
+ catch(e){
+ print(e)
+ assert( false, "Should be able to shard non-unique index without unique option.")
+ }
+
+ }
+ if ( i == 2 ) {
+ if (false) { // SERVER-3718
+ // Non-unique index exists as prefix, also index exists. No unique index.
+ coll.ensureIndex( { x : 1 } )
+ coll.ensureIndex( { x : 1, num : 1 } )
+
+ passed = false;
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } })
+ passed = true;
+
+ }
+ catch( e ){
+ print(e)
+ }
+ assert( !passed, "Should not shard collection with no unique index.")
+ }
+ }
+ if ( i == 3 ) {
+
+ // Unique index exists as prefix, also unique index exists
+ coll.ensureIndex( { num : 1 }, { unique : true })
+ coll.ensureIndex( { num : 1 , x : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
+ }
+ catch( e ){
+ print(e)
+ assert( false, "Should be able to shard collection with unique prefix index.")
+ }
+
+ }
+ if ( i == 4 ) {
+
+ // Unique index exists as id, also unique prefix index exists
+ coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { _id : 1 }, unique : true })
+ }
+ catch( e ){
+ print(e)
+ assert( false, "Should be able to shard collection with unique id index.")
+ }
+
+ }
+ if ( i == 5 ) {
+
+ // Unique index exists as id, also unique prefix index exists
+ coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { _id : 1, num : 1 }, unique : true })
+ }
+ catch( e ){
+ print(e)
+ assert( false, "Should be able to shard collection with unique combination id index.")
+ }
+
+ }
+ if ( i == 6 ) {
+
+ coll.remove()
+
+ // Unique index does not exist, also unique prefix index exists
+ coll.ensureIndex( { num : 1, _id : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
+ }
+ catch( e ){
+ print(e)
+ assert( false, "Should be able to shard collection with no unique index but with a unique prefix index.")
+ }
+
+ }
+ if ( i == 7 ) {
+ coll.remove()
+
+ // No index exists
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 } })
+ }
+ catch( e ){
+ print(e)
+ assert( !passed, "Should be able to shard collection with no index on shard key.")
+ }
+ }
+ if ( i == 8 ) {
+ if (false) { // SERVER-3718
+ coll.remove()
+
+ // No index exists
+
+ passed = false
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
+ passed = true
+ }
+ catch( e ){
+ print(e)
+ }
+ assert( !passed, "Should not shard collection with unique flag but with no unique index on shard key.")
+ }
+ }
+ if ( i == 9 ) {
+
+ // Unique index exists on a different field as well
+ coll.ensureIndex( { num : 1 }, { unique : true } )
+ coll.ensureIndex( { x : 1 }, { unique : true} )
+
+ passed = false
+ try {
+ s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } )
+ passed = true
+ } catch (e) {
+ print( e )
+ }
+ assert( !passed, "Should not shard collection when another unique index exists!" )
+ }
+}
+
+s.stop();
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index f6ba18a..917f152 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -40,6 +40,6 @@ for ( i=0; i<20; i+= 2 )
db.printShardingStatus()
-assert.soon( function(){ var x = s.chunkDiff( "foo" , "test" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 120 * 1000 , 2000 )
+assert.soon( function(){ var x = s.chunkDiff( "foo" , "test" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 8 * 60 * 1000 , 2000 )
s.stop()
diff --git a/jstests/sharding/migrateMemory.js b/jstests/sharding/migrateMemory.js
new file mode 100644
index 0000000..d321220
--- /dev/null
+++ b/jstests/sharding/migrateMemory.js
@@ -0,0 +1,54 @@
+
+s = new ShardingTest( "migrateMemory" , 2 , 1 , 1 , { chunksize : 1 });
+
+s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+
+db = s.getDB( "test" )
+t = db.foo
+
+str = ""
+while ( str.length < 10000 ){
+ str += "asdasdsdasdasdasdas";
+}
+
+data = 0;
+num = 0;
+while ( data < ( 1024 * 1024 * 10 ) ){
+ t.insert( { _id : num++ , s : str } )
+ data += str.length
+}
+
+db.getLastError()
+
+stats = s.chunkCounts( "foo" )
+from = ""
+to = ""
+for ( x in stats ){
+ if ( stats[x] == 0 )
+ to = x
+ else
+ from = x
+}
+
+s.config.chunks.find().sort( { min : 1 } ).forEach( printjsononeline )
+
+print( "from: " + from + " to: " + to )
+printjson( stats )
+
+ss = []
+
+for ( var f = 0; f<num; f += ( 2 * num / t.stats().nchunks ) ){
+ ss.push( s.getServer( "test" ).getDB( "admin" ).serverStatus() )
+ print( f )
+ s.adminCommand( { movechunk : "test.foo" , find : { _id : f } , to : to } )
+}
+
+for ( i=0; i<ss.length; i++ )
+ printjson( ss[i].mem );
+
+
+s.stop()
+
diff --git a/jstests/sharding/multi_mongos1.js b/jstests/sharding/multi_mongos1.js
index cf9ebde..fc7eaf1 100644
--- a/jstests/sharding/multi_mongos1.js
+++ b/jstests/sharding/multi_mongos1.js
@@ -67,4 +67,5 @@ assert.eq( N , viaS2.find().toArray().length , "other B" );
printjson( primary._db._adminCommand( "shardingState" ) );
-s1.stop(); \ No newline at end of file
+
+s1.stop();
diff --git a/jstests/sharding/multi_mongos2.js b/jstests/sharding/multi_mongos2.js
new file mode 100644
index 0000000..ec95dc0
--- /dev/null
+++ b/jstests/sharding/multi_mongos2.js
@@ -0,0 +1,61 @@
+// multi_mongos2.js
+// This tests sharding an existing collection that both shards are aware of (SERVER-2828)
+
+
+// setup sharding with two mongos, s1 and s2
+s1 = new ShardingTest( "multi_mongos1" , 2 , 1 , 2 );
+s2 = s1._mongos[1];
+
+s1.adminCommand( { enablesharding : "test" } );
+s1.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+
+s1.config.databases.find().forEach( printjson )
+
+// test queries
+
+s1.getDB('test').existing.insert({_id:1})
+assert.eq(1, s1.getDB('test').existing.count({_id:1}));
+assert.eq(1, s2.getDB('test').existing.count({_id:1}));
+
+s2.adminCommand( { shardcollection : "test.existing" , key : { _id : 1 } } );
+s2.adminCommand( { split : "test.existing" , find : { _id : 5 } } )
+
+res = s2.getDB( "admin" ).runCommand( { moveChunk: "test.existing" , find : { _id : 1 } , to : s1.getOther( s1.getServer( "test" ) ).name } );
+
+assert.eq(1 , res.ok, tojson(res));
+
+printjson( s2.adminCommand( {"getShardVersion" : "test.existing" } ) )
+printjson( new Mongo(s1.getServer( "test" ).name).getDB( "admin" ).adminCommand( {"getShardVersion" : "test.existing" } ) )
+
+assert.eq(1, s1.getDB('test').existing.count({_id:1})); // SERVER-2828
+assert.eq(1, s2.getDB('test').existing.count({_id:1}));
+
+// test stats
+
+s1.getDB('test').existing2.insert({_id:1})
+assert.eq(1, s1.getDB('test').existing2.count({_id:1}));
+assert.eq(1, s2.getDB('test').existing2.count({_id:1}));
+
+s2.adminCommand( { shardcollection : "test.existing2" , key : { _id : 1 } } );
+s2.adminCommand( { split : "test.existing2" , find : { _id : 5 } } )
+
+var res = s1.getDB('test').existing2.stats()
+printjson( res )
+assert.eq(true, res.sharded); //SERVER-2828
+assert.eq(true, s2.getDB('test').existing2.stats().sharded);
+
+// test admin commands
+
+s1.getDB('test').existing3.insert({_id:1})
+assert.eq(1, s1.getDB('test').existing3.count({_id:1}));
+assert.eq(1, s2.getDB('test').existing3.count({_id:1}));
+
+s2.adminCommand( { shardcollection : "test.existing3" , key : { _id : 1 } } );
+s2.adminCommand( { split : "test.existing3" , find : { _id : 5 } } )
+
+res = s1.getDB( "admin" ).runCommand( { moveChunk: "test.existing3" , find : { _id : 1 } , to : s1.getOther( s1.getServer( "test" ) ).name } );
+assert.eq(1 , res.ok, tojson(res));
+
+
+
+s1.stop();
diff --git a/jstests/sharding/parallel.js b/jstests/sharding/parallel.js
new file mode 100644
index 0000000..d35459c
--- /dev/null
+++ b/jstests/sharding/parallel.js
@@ -0,0 +1,38 @@
+numShards = 3
+s = new ShardingTest( "parallel" , numShards , 2 , 2 , { sync : true } );
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+
+db = s.getDB( "test" );
+
+N = 10000;
+
+for ( i=0; i<N; i+=(N/12) ) {
+ s.adminCommand( { split : "test.foo" , middle : { _id : i } } )
+ sh.moveChunk( "test.foo", { _id : i } , "shard000" + Math.floor( Math.random() * numShards ) )
+}
+
+
+for ( i=0; i<N; i++ )
+ db.foo.insert( { _id : i } )
+db.getLastError();
+
+
+doCommand = function( dbname , cmd ) {
+ x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] ,
+ host : db.getMongo().host , parallel : 2 , seconds : 2 } )
+ printjson(x)
+ x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] ,
+ host : s._mongos[1].host , parallel : 2 , seconds : 2 } )
+ printjson(x)
+}
+
+doCommand( "test" , { dbstats : 1 } )
+doCommand( "config" , { dbstats : 1 } )
+
+x = s.getDB( "config" ).stats()
+assert( x.ok , tojson(x) )
+printjson(x)
+
+s.stop()
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index 7132563..e27316e 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -4,9 +4,18 @@ s = new ShardingTest( "shard3" , 2 , 1 , 2 );
s2 = s._mongos[1];
+db = s.getDB( "test" )
s.adminCommand( { enablesharding : "test" } );
s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+assert( sh.getBalancerState() , "A1" )
+sh.setBalancerState( false )
+assert( ! sh.getBalancerState() , "A2" )
+sh.setBalancerState( true )
+assert( sh.getBalancerState() , "A3" )
+sh.setBalancerState( false )
+assert( ! sh.getBalancerState() , "A4" )
+
s.config.databases.find().forEach( printjson )
a = s.getDB( "test" ).foo;
@@ -53,6 +62,7 @@ function doCounts( name , total , onlyItCounts ){
var total = doCounts( "before wrong save" )
secondary.save( { num : -3 } );
+printjson( secondary.getDB().getLastError() )
doCounts( "after wrong save" , total , true )
e = a.find().explain();
assert.eq( 3 , e.n , "ex1" )
@@ -127,7 +137,7 @@ print( "*** ready to call dropDatabase" )
res = s.getDB( "test" ).dropDatabase();
assert.eq( 1 , res.ok , "dropDatabase failed : " + tojson( res ) );
// Waiting for SERVER-2253
-// assert.eq( 0 , s.config.databases.count( { _id: "test" } ) , "database 'test' was dropped but still appears in configDB" );
+assert.eq( 0 , s.config.databases.count( { _id: "test" } ) , "database 'test' was dropped but still appears in configDB" );
s.printShardingStatus();
s.printCollectionInfo( "test.foo" , "after dropDatabase call 1" );
diff --git a/jstests/sharding/shard6.js b/jstests/sharding/shard6.js
index 70c5ed7..1b58cc7 100644
--- a/jstests/sharding/shard6.js
+++ b/jstests/sharding/shard6.js
@@ -103,4 +103,7 @@ assert.eq( 16 , o.x , "x1 - did save fail? " + tojson(o) );
poolStats( "at end" )
print( summary )
+
+assert.throws( function(){ s.adminCommand( { enablesharding : "admin" } ) } )
+
s.stop();
diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js
index 5d185a5..de3d63e 100644
--- a/jstests/sharding/shard_insert_getlasterror_w2.js
+++ b/jstests/sharding/shard_insert_getlasterror_w2.js
@@ -75,7 +75,8 @@ function go() {
return false;
}
return true;
- });
+ }, "Queries took too long to complete correctly.",
+ 2 * 60 * 1000 );
// Done
routerSpec.end()
diff --git a/jstests/sharding/shard_keycount.js b/jstests/sharding/shard_keycount.js
new file mode 100644
index 0000000..e27c054
--- /dev/null
+++ b/jstests/sharding/shard_keycount.js
@@ -0,0 +1,45 @@
+// Tests splitting a chunk twice
+
+s = new ShardingTest( "shard1" , 2, 0, 1, /* chunkSize */1);
+
+dbName = "test"
+collName = "foo"
+ns = dbName + "." + collName
+
+db = s.getDB( dbName );
+
+for(var i = 0; i < 10; i++){
+ db.foo.insert({ _id : i })
+}
+
+// Enable sharding on DB
+s.adminCommand( { enablesharding : dbName } );
+
+// Enable sharding on collection
+s.adminCommand( { shardcollection : ns, key : { _id : 1 } } );
+
+// Kill balancer
+s.config.settings.update({ _id: "balancer" }, { $set : { stopped: true } }, true )
+
+// Split into two chunks
+s.adminCommand({ split : ns, find : { _id : 3 } })
+
+coll = db.getCollection( collName )
+
+// Split chunk again
+s.adminCommand({ split : ns, find : { _id : 3 } })
+
+coll.update({ _id : 3 }, { _id : 3 })
+
+// Split chunk again
+s.adminCommand({ split : ns, find : { _id : 3 } })
+
+coll.update({ _id : 3 }, { _id : 3 })
+
+// Split chunk again
+// FAILS since the key count is based on the full index, not the chunk itself
+// i.e. Split point calc'd is 5 key offset (10 documents), but only four docs
+// in chunk with bounds _id : 0 => 5
+s.adminCommand({ split : ns, find : { _id : 3 } })
+
+s.stop();
diff --git a/jstests/sharding/sharding_with_keyfile.js b/jstests/sharding/sharding_with_keyfile.js
new file mode 100644
index 0000000..94aea57
--- /dev/null
+++ b/jstests/sharding/sharding_with_keyfile.js
@@ -0,0 +1,69 @@
+// Tests sharding with a key file
+
+var st = new ShardingTest({ name : jsTestName(),
+ shards : 2,
+ mongos : 1,
+ keyFile : keyFile = "jstests/sharding/" + jsTestName() + ".key" })
+
+// Make sure all our instances got the key
+var configs = st._configDB.split(",")
+for( var i = 0; i < configs.length; i++ ) configs[i] = new Mongo( configs[i] )
+var shards = st._connections
+var mongoses = st._mongos
+
+for( var i = 0; i < configs.length; i++ )
+ assert.eq( configs[i].getDB("admin").runCommand({ getCmdLineOpts : 1 }).parsed.keyFile, keyFile )
+
+for( var i = 0; i < shards.length; i++ )
+ assert.eq( shards[i].getDB("admin").runCommand({ getCmdLineOpts : 1 }).parsed.keyFile, keyFile )
+
+for( var i = 0; i < mongoses.length; i++ )
+ assert.eq( mongoses[i].getDB("admin").runCommand({ getCmdLineOpts : 1 }).parsed.keyFile, keyFile )
+
+var mongos = st.s0
+var coll = mongos.getCollection( "test.foo" )
+
+st.shardColl( coll, { _id : 1 } )
+
+// Create an index so we can find by num later
+coll.ensureIndex({ insert : 1 })
+
+// For more logging
+// mongos.getDB("admin").runCommand({ setParameter : 1, logLevel : 3 })
+
+print( "INSERT!" )
+
+// Insert a bunch of data
+var toInsert = 2000
+for( var i = 0; i < toInsert; i++ ){
+ coll.insert({ my : "test", data : "to", insert : i })
+}
+
+assert.eq( coll.getDB().getLastError(), null )
+
+print( "UPDATE!" )
+
+// Update a bunch of data
+var toUpdate = toInsert
+for( var i = 0; i < toUpdate; i++ ){
+ var id = coll.findOne({ insert : i })._id
+ coll.update({ insert : i, _id : id }, { $inc : { counter : 1 } })
+}
+
+assert.eq( coll.getDB().getLastError(), null )
+
+print( "DELETE" )
+
+// Remove a bunch of data
+var toDelete = toInsert / 2
+for( var i = 0; i < toDelete; i++ ){
+ coll.remove({ insert : i })
+}
+
+assert.eq( coll.getDB().getLastError(), null )
+
+// Make sure the right amount of data is there
+assert.eq( coll.find().count(), toInsert / 2 )
+
+// Finish
+st.stop()
diff --git a/jstests/sharding/sharding_with_keyfile.key b/jstests/sharding/sharding_with_keyfile.key
new file mode 100755
index 0000000..fe3344b
--- /dev/null
+++ b/jstests/sharding/sharding_with_keyfile.key
@@ -0,0 +1,3 @@
+aBcDeFg
+1010101
+JJJJJJJ \ No newline at end of file
diff --git a/jstests/sharding/sync6.js b/jstests/sharding/sync6.js
new file mode 100644
index 0000000..233534b
--- /dev/null
+++ b/jstests/sharding/sync6.js
@@ -0,0 +1,81 @@
+// Test that distributed lock forcing does not result in inconsistencies, using a
+// fast timeout.
+
+// Note that this test will always have random factors, since we can't control the
+// thread scheduling.
+
+test = new SyncCCTest( "sync6", { logpath : "/dev/null" } )
+
+// Startup another process to handle our commands to the cluster, mostly so it's
+// easier to read.
+var commandConn = startMongodTest( 30000 + 4, "syncCommander", false, {})//{ logpath : "/dev/null" } )//{verbose : ""} )
+// { logpath : "/data/db/syncCommander/mongod.log" } );
+
+// Up the log level for this test
+commandConn.getDB( "admin" ).runCommand( { setParameter : 1, logLevel : 1 } )
+
+// Have lots of threads, so use larger i
+// Can't test too many, we get socket exceptions... possibly due to the
+// javascript console.
+for ( var i = 8; i < 9; i++ ) {
+
+ // Our force time is 4 seconds
+ // Slower machines can't keep up the LockPinger rate, which can lead to lock failures
+ // since our locks are only valid if the LockPinger pings faster than the force time.
+ // Actual lock timeout is 15 minutes, so a few seconds is extremely aggressive
+ var takeoverMS = 4000;
+
+ // Generate valid sleep and skew for this timeout
+ var threadSleepWithLock = takeoverMS / 2;
+ var configServerTimeSkew = [ 0, 0, 0 ]
+ for ( var h = 0; h < 3; h++ ) {
+ // Skew by 1/30th the takeover time either way, at max
+ configServerTimeSkew[h] = ( i + h ) % Math.floor( takeoverMS / 60 )
+ // Make skew pos or neg
+ configServerTimeSkew[h] *= ( ( i + h ) % 2 ) ? -1 : 1;
+ }
+
+ // Build command
+ command = { _testDistLockWithSkew : 1 }
+
+ // Basic test parameters
+ command["lockName"] = "TimeSkewFailNewTest_lock_" + i;
+ command["host"] = test.url
+ command["seed"] = i
+ command["numThreads"] = ( i % 50 ) + 1
+
+ // Critical values so we're sure of correct operation
+ command["takeoverMS"] = takeoverMS
+ command["wait"] = 4 * takeoverMS // so we must force the lock
+ command["skewHosts"] = configServerTimeSkew
+ command["threadWait"] = threadSleepWithLock
+
+ // Less critical test params
+
+ // 1/3 of threads will not release the lock
+ command["hangThreads"] = 3
+ // Amount of time to wait before trying lock again
+ command["threadSleep"] = 1;// ( ( i + 1 ) * 100 ) % (takeoverMS / 4)
+ // Amount of total clock skew possible between locking threads (processes)
+ // This can be large now.
+ command["skewRange"] = ( command["takeoverMS"] * 3 ) * 60 * 1000
+
+ // Double-check our sleep, host skew, and takeoverMS values again
+
+ // At maximum, our threads must sleep only half the lock timeout time.
+ assert( command["threadWait"] <= command["takeoverMS"] / 2 )
+ for ( var h = 0; h < command["skewHosts"].length; h++ ) {
+ // At maximum, our config server time skew needs to be less than 1/30th
+ // the total time skew (1/60th either way).
+ assert( Math.abs( command["skewHosts"][h] ) <= ( command["takeoverMS"] / 60 ) )
+ }
+
+ result = commandConn.getDB( "admin" ).runCommand( command )
+ printjson( result )
+ printjson( command )
+ assert( result.ok, "Skewed threads did not increment correctly." );
+
+}
+
+stopMongoProgram( 30004 )
+test.stop();
diff --git a/jstests/sharding/sync7.js b/jstests/sharding/sync7.js
new file mode 100644
index 0000000..a8ff094
--- /dev/null
+++ b/jstests/sharding/sync7.js
@@ -0,0 +1,63 @@
+// Test that the clock skew of the distributed lock disallows getting locks for moving and splitting.
+
+s = new ShardingTest( "moveDistLock", 3, 0, undefined, { sync : true } );
+
+s._connections[0].getDB( "admin" ).runCommand( { _skewClockCommand : 1, skew : 15000 } )
+s._connections[1].getDB( "admin" ).runCommand( { _skewClockCommand : 1, skew : -16000 } )
+
+// We need to start another mongos after skewing the clock, since the first mongos will have already
+// tested the config servers (via the balancer) before we manually skewed them
+otherMongos = startMongos( { port : 30020, v : 0, configdb : s._configDB } );
+
+// Initialize DB data
+initDB = function(name) {
+ var db = s.getDB( name );
+ var c = db.foo;
+ c.save( { a : 1 } );
+ c.save( { a : 2 } );
+ c.save( { a : 3 } );
+ assert( 3, c.count() );
+
+ return s.getServer( name );
+}
+
+from = initDB( "test1" );
+to = s.getAnother( from );
+
+s.printShardingStatus();
+
+// Make sure we can't move when our clock skew is so high
+result = otherMongos.getDB( "admin" ).runCommand( { moveprimary : "test1", to : to.name } );
+s.printShardingStatus();
+assert.eq( result.ok, 0, "Move command should not have succeeded!" )
+
+// Enable sharding on DB and collection
+result = otherMongos.getDB("admin").runCommand( { enablesharding : "test1" } );
+result = otherMongos.getDB("test1").foo.ensureIndex( { a : 1 } );
+result = otherMongos.getDB("admin").runCommand( { shardcollection : "test1.foo", key : { a : 1 } } );
+print(" Collection Sharded! ")
+
+// Make sure we can't split when our clock skew is so high
+result = otherMongos.getDB( "admin" ).runCommand( { split : "test1.foo", find : { a : 2 } } );
+assert.eq( result.ok, 0, "Split command should not have succeeded!")
+
+// Adjust clock back in bounds
+s._connections[1].getDB( "admin" ).runCommand( { _skewClockCommand : 1, skew : 0 } )
+print(" Clock adjusted back to in-bounds. ");
+
+// Make sure we can now split
+result = otherMongos.getDB( "admin" ).runCommand( { split : "test1.foo", find : { a : 2 } } );
+s.printShardingStatus();
+assert.eq( result.ok, 1, "Split command should have succeeded!")
+
+// Make sure we can now move
+result = otherMongos.getDB( "admin" ).runCommand( { moveprimary : "test1", to : to.name } );
+s.printShardingStatus();
+assert.eq( result.ok, 1, "Move command should have succeeded!" )
+
+// Make sure we can now move again (getting the lock twice)
+result = otherMongos.getDB( "admin" ).runCommand( { moveprimary : "test1", to : from.name } );
+s.printShardingStatus();
+assert.eq( result.ok, 1, "Move command should have succeeded again!" )
+
+s.stop();