summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
authorAntonin Kral <a.kral@bobek.cz>2010-01-31 08:32:52 +0100
committerAntonin Kral <a.kral@bobek.cz>2010-01-31 08:32:52 +0100
commit4eefaf421bfeddf040d96a3dafb12e09673423d7 (patch)
treecb2e5ccc7f98158894f977ff131949da36673591 /jstests/sharding
downloadmongodb-4eefaf421bfeddf040d96a3dafb12e09673423d7.tar.gz
Imported Upstream version 1.3.1
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/auto1.js51
-rw-r--r--jstests/sharding/auto2.js44
-rw-r--r--jstests/sharding/count1.js55
-rw-r--r--jstests/sharding/diffservers1.js20
-rw-r--r--jstests/sharding/error1.js47
-rw-r--r--jstests/sharding/features1.js139
-rw-r--r--jstests/sharding/features2.js114
-rw-r--r--jstests/sharding/key_many.js121
-rw-r--r--jstests/sharding/key_string.js44
-rw-r--r--jstests/sharding/movePrimary1.js31
-rw-r--r--jstests/sharding/moveshard1.js39
-rw-r--r--jstests/sharding/passthrough1.js10
-rw-r--r--jstests/sharding/shard1.js32
-rw-r--r--jstests/sharding/shard2.js194
-rw-r--r--jstests/sharding/shard3.js130
-rw-r--r--jstests/sharding/shard4.js49
-rw-r--r--jstests/sharding/shard5.js52
-rw-r--r--jstests/sharding/shard6.js39
-rw-r--r--jstests/sharding/splitpick.js33
-rw-r--r--jstests/sharding/update1.js33
-rw-r--r--jstests/sharding/version1.js23
-rw-r--r--jstests/sharding/version2.js36
22 files changed, 1336 insertions, 0 deletions
diff --git a/jstests/sharding/auto1.js b/jstests/sharding/auto1.js
new file mode 100644
index 0000000..92a4ce8
--- /dev/null
+++ b/jstests/sharding/auto1.js
@@ -0,0 +1,51 @@
+// auto1.js
+
+s = new ShardingTest( "auto1" , 2 , 1 , 1 );
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+
+bigString = "";
+while ( bigString.length < 1024 * 50 )
+ bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
+
+db = s.getDB( "test" )
+coll = db.foo;
+
+var i=0;
+
+for ( ; i<500; i++ ){
+ coll.save( { num : i , s : bigString } );
+}
+
+s.adminCommand( "connpoolsync" );
+
+primary = s.getServer( "test" ).getDB( "test" );
+
+assert.eq( 1 , s.config.chunks.count() );
+assert.eq( 500 , primary.foo.count() );
+
+print( "datasize: " + tojson( s.getServer( "test" ).getDB( "admin" ).runCommand( { datasize : "test.foo" } ) ) );
+
+for ( ; i<800; i++ ){
+ coll.save( { num : i , s : bigString } );
+}
+
+assert.eq( 1 , s.config.chunks.count() );
+
+for ( ; i<1500; i++ ){
+ coll.save( { num : i , s : bigString } );
+}
+
+assert.eq( 3 , s.config.chunks.count() , "shard didn't split A " );
+s.printChunks();
+
+for ( ; i<3000; i++ ){
+ coll.save( { num : i , s : bigString } );
+}
+
+assert.eq( 4 , s.config.chunks.count() , "shard didn't split B " );
+s.printChunks();
+
+
+s.stop();
diff --git a/jstests/sharding/auto2.js b/jstests/sharding/auto2.js
new file mode 100644
index 0000000..c6ec374
--- /dev/null
+++ b/jstests/sharding/auto2.js
@@ -0,0 +1,44 @@
+// auto2.js
+
+s = new ShardingTest( "auto2" , 2 , 1 , 1 );
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+
+bigString = "";
+while ( bigString.length < 1024 * 50 )
+ bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
+
+db = s.getDB( "test" )
+coll = db.foo;
+
+var i=0;
+
+for ( j=0; j<30; j++ ){
+ print( "j:" + j + " : " +
+ Date.timeFunc(
+ function(){
+ for ( var k=0; k<100; k++ ){
+ coll.save( { num : i , s : bigString } );
+ i++;
+ }
+ }
+ ) );
+
+}
+s.adminCommand( "connpoolsync" );
+
+print( "done inserting data" );
+
+print( "datasize: " + tojson( s.getServer( "test" ).getDB( "admin" ).runCommand( { datasize : "test.foo" } ) ) );
+s.printChunks();
+
+counta = s._connections[0].getDB( "test" ).foo.count();
+countb = s._connections[1].getDB( "test" ).foo.count();
+
+assert.eq( j * 100 , counta + countb , "from each a:" + counta + " b:" + countb + " i:" + i );
+assert.eq( j * 100 , coll.find().limit(100000000).itcount() , "itcount A" );
+
+assert( Array.unique( s.config.chunks.find().toArray().map( function(z){ return z.shard; } ) ).length == 2 , "should be using both servers" );
+
+s.stop();
diff --git a/jstests/sharding/count1.js b/jstests/sharding/count1.js
new file mode 100644
index 0000000..a697162
--- /dev/null
+++ b/jstests/sharding/count1.js
@@ -0,0 +1,55 @@
+// count1.js
+
+s = new ShardingTest( "count1" , 2 );
+
+db = s.getDB( "test" );
+
+db.bar.save( { n : 1 } )
+db.bar.save( { n : 2 } )
+db.bar.save( { n : 3 } )
+
+assert.eq( 3 , db.bar.find().count() , "bar 1" );
+assert.eq( 1 , db.bar.find( { n : 1 } ).count() , "bar 2" );
+
+s.adminCommand( { enablesharding : "test" } )
+s.adminCommand( { shardcollection : "test.foo" , key : { name : 1 } } );
+
+primary = s.getServer( "test" ).getDB( "test" );
+seconday = s.getOther( primary ).getDB( "test" );
+
+assert.eq( 1 , s.config.chunks.count() , "sanity check A" );
+
+db.foo.save( { name : "eliot" } )
+db.foo.save( { name : "sara" } )
+db.foo.save( { name : "bob" } )
+db.foo.save( { name : "joe" } )
+db.foo.save( { name : "mark" } )
+db.foo.save( { name : "allan" } )
+
+assert.eq( 6 , db.foo.find().count() , "basic count" );
+
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
+
+assert.eq( 6 , db.foo.find().count() , "basic count after split " );
+assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "basic count after split sorted " );
+
+s.adminCommand( { movechunk : "test.foo" , find : { name : "joe" } , to : seconday.getMongo().name } );
+
+assert.eq( 3 , primary.foo.find().toArray().length , "primary count" );
+assert.eq( 3 , seconday.foo.find().toArray().length , "secondary count" );
+assert.eq( 3 , primary.foo.find().sort( { name : 1 } ).toArray().length , "primary count sorted" );
+assert.eq( 3 , seconday.foo.find().sort( { name : 1 } ).toArray().length , "secondary count sorted" );
+
+assert.eq( 6 , db.foo.find().toArray().length , "total count after move" );
+assert.eq( 6 , db.foo.find().sort( { name : 1 } ).toArray().length , "total count() sorted" );
+
+assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "total count with count() after move" );
+
+assert.eq( "allan,bob,eliot,joe,mark,sara" , db.foo.find().sort( { name : 1 } ).toArray().map( function(z){ return z.name; } ) , "sort 1" );
+assert.eq( "sara,mark,joe,eliot,bob,allan" , db.foo.find().sort( { name : -1 } ).toArray().map( function(z){ return z.name; } ) , "sort 2" );
+
+s.stop();
+
+
diff --git a/jstests/sharding/diffservers1.js b/jstests/sharding/diffservers1.js
new file mode 100644
index 0000000..6497bc0
--- /dev/null
+++ b/jstests/sharding/diffservers1.js
@@ -0,0 +1,20 @@
+
+
+s = new ShardingTest( "diffservers1" , 2 );
+
+assert.eq( 2 , s.config.shards.count() , "server count wrong" );
+assert.eq( 2 , s._connections[0].getDB( "config" ).shards.count() , "where are servers!" );
+assert.eq( 0 , s._connections[1].getDB( "config" ).shards.count() , "shouldn't be here" );
+
+test1 = s.getDB( "test1" ).foo;
+test1.save( { a : 1 } );
+test1.save( { a : 2 } );
+test1.save( { a : 3 } );
+assert( 3 , test1.count() );
+
+assert( ! s.admin.runCommand( { addshard: "sdd$%" } ).ok , "bad hostname" );
+assert( ! s.admin.runCommand( { addshard: "127.0.0.1:43415" } ).ok , "host not up" );
+assert( ! s.admin.runCommand( { addshard: "127.0.0.1:43415" , allowLocal : true } ).ok , "host not up" );
+
+s.stop();
+
diff --git a/jstests/sharding/error1.js b/jstests/sharding/error1.js
new file mode 100644
index 0000000..b4db9c3
--- /dev/null
+++ b/jstests/sharding/error1.js
@@ -0,0 +1,47 @@
+
+s = new ShardingTest( "error1" , 2 , 1 , 1 );
+s.adminCommand( { enablesharding : "test" } );
+
+a = s._connections[0].getDB( "test" );
+b = s._connections[1].getDB( "test" );
+
+// ---- simple getLastError ----
+
+db = s.getDB( "test" );
+db.foo.insert( { _id : 1 } );
+assert.isnull( db.getLastError() , "gle 1" );
+db.foo.insert( { _id : 1 } );
+assert( db.getLastError() , "gle21" );
+assert( db.getLastError() , "gle22" );
+
+// --- sharded getlasterror
+
+s.adminCommand( { shardcollection : "test.foo2" , key : { num : 1 } } );
+
+db.foo2.insert( { _id : 1 , num : 5 } );
+db.foo2.insert( { _id : 2 , num : 10 } );
+db.foo2.insert( { _id : 3 , num : 15 } );
+db.foo2.insert( { _id : 4 , num : 20 } );
+
+s.adminCommand( { split : "test.foo2" , middle : { num : 10 } } );
+s.adminCommand( { movechunk : "test.foo2" , find : { num : 20 } , to : s.getOther( s.getServer( "test" ) ).name } );
+
+assert( a.foo2.count() > 0 && a.foo2.count() < 4 , "se1" );
+assert( b.foo2.count() > 0 && b.foo2.count() < 4 , "se2" );
+assert.eq( 4 , db.foo2.count() , "se3" );
+
+db.foo2.insert( { _id : 5 , num : 25 } );
+assert( ! db.getLastError() , "se3.5" );
+s.sync();
+assert.eq( 5 , db.foo2.count() , "se4" );
+
+
+
+db.foo2.insert( { _id : 5 , num : 30 } );
+assert( db.getLastError() , "se5" );
+assert( db.getLastError() , "se6" );
+
+assert.eq( 5 , db.foo2.count() , "se5" );
+
+// ----
+s.stop();
diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js
new file mode 100644
index 0000000..d2f692a
--- /dev/null
+++ b/jstests/sharding/features1.js
@@ -0,0 +1,139 @@
+// features1.js
+
+s = new ShardingTest( "features1" , 2 , 1 , 1 );
+
+s.adminCommand( { enablesharding : "test" } );
+
+// ---- can't shard system namespaces ----
+
+assert( ! s.admin.runCommand( { shardcollection : "test.system.blah" , key : { num : 1 } } ).ok , "shard system namespace" );
+
+// ---- setup test.foo -----
+
+s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+
+db = s.getDB( "test" );
+
+a = s._connections[0].getDB( "test" );
+b = s._connections[1].getDB( "test" );
+
+db.foo.ensureIndex( { y : 1 } );
+
+s.adminCommand( { split : "test.foo" , middle : { num : 10 } } );
+s.adminCommand( { movechunk : "test.foo" , find : { num : 20 } , to : s.getOther( s.getServer( "test" ) ).name } );
+
+db.foo.save( { num : 5 } );
+db.foo.save( { num : 15 } );
+
+s.sync();
+
+// ---- make sure shard key index is everywhere ----
+
+assert.eq( 3 , a.foo.getIndexKeys().length , "a index 1" );
+assert.eq( 3 , b.foo.getIndexKeys().length , "b index 1" );
+
+// ---- make sure if you add an index it goes everywhere ------
+
+db.foo.ensureIndex( { x : 1 } );
+
+s.sync();
+
+assert.eq( 4 , a.foo.getIndexKeys().length , "a index 2" );
+assert.eq( 4 , b.foo.getIndexKeys().length , "b index 2" );
+
+// ---- no unique indexes ------
+
+db.foo.ensureIndex( { z : 1 } , true );
+
+s.sync();
+
+assert.eq( 4 , a.foo.getIndexKeys().length , "a index 3" );
+assert.eq( 4 , b.foo.getIndexKeys().length , "b index 3" );
+
+// ---- can't shard thing with unique indexes
+
+db.foo2.ensureIndex( { a : 1 } );
+s.sync();
+assert( s.admin.runCommand( { shardcollection : "test.foo2" , key : { num : 1 } } ).ok , "shard with index" );
+
+db.foo3.ensureIndex( { a : 1 } , true );
+s.sync();
+printjson( db.system.indexes.find( { ns : "test.foo3" } ).toArray() );
+assert( ! s.admin.runCommand( { shardcollection : "test.foo3" , key : { num : 1 } } ).ok , "shard with unique index" );
+
+// ----- eval -----
+
+db.foo2.save( { num : 5 , a : 7 } );
+db.foo3.save( { num : 5 , a : 8 } );
+
+assert.eq( 1 , db.foo3.count() , "eval pre1" );
+assert.eq( 1 , db.foo2.count() , "eval pre2" );
+
+assert.eq( 8 , db.eval( function(){ return db.foo3.findOne().a; } ), "eval 1 " );
+assert.throws( function(){ db.eval( function(){ return db.foo2.findOne().a; } ) } , "eval 2" )
+
+assert.eq( 1 , db.eval( function(){ return db.foo3.count(); } ), "eval 3 " );
+assert.throws( function(){ db.eval( function(){ return db.foo2.count(); } ) } , "eval 4" )
+
+
+// ---- unique shard key ----
+
+assert( s.admin.runCommand( { shardcollection : "test.foo4" , key : { num : 1 } , unique : true } ).ok , "shard with index and unique" );
+s.adminCommand( { split : "test.foo4" , middle : { num : 10 } } );
+s.adminCommand( { movechunk : "test.foo4" , find : { num : 20 } , to : s.getOther( s.getServer( "test" ) ).name } );
+db.foo4.save( { num : 5 } );
+db.foo4.save( { num : 15 } );
+s.sync();
+assert.eq( 1 , a.foo4.count() , "ua1" );
+assert.eq( 1 , b.foo4.count() , "ub1" );
+
+assert.eq( 2 , a.foo4.getIndexes().length , "ua2" );
+assert.eq( 2 , b.foo4.getIndexes().length , "ub2" );
+
+assert( a.foo4.getIndexes()[1].unique , "ua3" );
+assert( b.foo4.getIndexes()[1].unique , "ub3" );
+
+// --- don't let you convertToCapped ----
+assert( ! db.foo4.isCapped() , "ca1" );
+assert( ! a.foo4.isCapped() , "ca2" );
+assert( ! b.foo4.isCapped() , "ca3" );
+assert( ! db.foo4.convertToCapped( 30000 ).ok , "ca30" );
+assert( ! db.foo4.isCapped() , "ca4" );
+assert( ! a.foo4.isCapped() , "ca5" );
+assert( ! b.foo4.isCapped() , "ca6" );
+
+// make sure i didn't break anything
+db.foo4a.save( { a : 1 } );
+assert( ! db.foo4a.isCapped() , "ca7" );
+db.foo4a.convertToCapped( 30000 );
+assert( db.foo4a.isCapped() , "ca8" );
+
+// --- don't let you shard a capped collection
+
+db.createCollection("foo5", {capped:true, size:30000});
+assert( db.foo5.isCapped() , "cb1" );
+assert( ! s.admin.runCommand( { shardcollection : "test.foo5" , key : { num : 1 } } ).ok , "shard capped" );
+
+
+// ----- group ----
+
+db.foo6.save( { a : 1 } );
+db.foo6.save( { a : 3 } );
+db.foo6.save( { a : 3 } );
+s.sync();
+
+assert.eq( 2 , db.foo6.group( { key : { a : 1 } , initial : { count : 0 } ,
+ reduce : function(z,prev){ prev.count++; } } ).length );
+
+assert.eq( 3 , db.foo6.find().count() );
+assert( s.admin.runCommand( { shardcollection : "test.foo6" , key : { a : 2 } } ).ok );
+assert.eq( 3 , db.foo6.find().count() );
+
+s.adminCommand( { split : "test.foo6" , middle : { a : 2 } } );
+s.adminCommand( { movechunk : "test.foo6" , find : { a : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
+
+assert.throws( function(){ db.foo6.group( { key : { a : 1 } , initial : { count : 0 } , reduce : function(z,prev){ prev.count++; } } ); } );;
+
+
+s.stop()
+
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
new file mode 100644
index 0000000..47fedc8
--- /dev/null
+++ b/jstests/sharding/features2.js
@@ -0,0 +1,114 @@
+// features2.js
+
+s = new ShardingTest( "features2" , 2 , 1 , 1 );
+s.adminCommand( { enablesharding : "test" } );
+
+a = s._connections[0].getDB( "test" );
+b = s._connections[1].getDB( "test" );
+
+db = s.getDB( "test" );
+
+// ---- distinct ----
+
+db.foo.save( { x : 1 } );
+db.foo.save( { x : 2 } );
+db.foo.save( { x : 3 } );
+
+assert.eq( "1,2,3" , db.foo.distinct( "x" ) , "distinct 1" );
+assert( a.foo.distinct("x").length == 3 || b.foo.distinct("x").length == 3 , "distinct 2" );
+assert( a.foo.distinct("x").length == 0 || b.foo.distinct("x").length == 0 , "distinct 3" );
+
+assert.eq( 1 , s.onNumShards( "foo" ) , "A1" );
+
+s.shardGo( "foo" , { x : 1 } , { x : 2 } , { x : 3 } );
+
+assert.eq( 2 , s.onNumShards( "foo" ) , "A2" );
+
+assert.eq( "1,2,3" , db.foo.distinct( "x" ) , "distinct 4" );
+
+// ----- delete ---
+
+assert.eq( 3 , db.foo.count() , "D1" );
+
+db.foo.remove( { x : 3 } );
+assert.eq( 2 , db.foo.count() , "D2" );
+
+db.foo.save( { x : 3 } );
+assert.eq( 3 , db.foo.count() , "D3" );
+
+db.foo.remove( { x : { $gt : 2 } } );
+assert.eq( 2 , db.foo.count() , "D4" );
+
+db.foo.remove( { x : { $gt : -1 } } );
+assert.eq( 0 , db.foo.count() , "D5" );
+
+db.foo.save( { x : 1 } );
+db.foo.save( { x : 2 } );
+db.foo.save( { x : 3 } );
+assert.eq( 3 , db.foo.count() , "D6" );
+db.foo.remove( {} );
+assert.eq( 0 , db.foo.count() , "D7" );
+
+// --- _id key ---
+
+db.foo2.insert( { _id : new ObjectId() } );
+db.foo2.insert( { _id : new ObjectId() } );
+db.foo2.insert( { _id : new ObjectId() } );
+
+assert.eq( 1 , s.onNumShards( "foo2" ) , "F1" );
+
+s.adminCommand( { shardcollection : "test.foo2" , key : { _id : 1 } } );
+
+assert.eq( 3 , db.foo2.count() , "F2" )
+db.foo2.insert( {} );
+assert.eq( 4 , db.foo2.count() , "F3" )
+
+
+// --- map/reduce
+
+db.mr.save( { x : 1 , tags : [ "a" , "b" ] } );
+db.mr.save( { x : 2 , tags : [ "b" , "c" ] } );
+db.mr.save( { x : 3 , tags : [ "c" , "a" ] } );
+db.mr.save( { x : 4 , tags : [ "b" , "c" ] } );
+
+m = function(){
+ this.tags.forEach(
+ function(z){
+ emit( z , { count : 1 } );
+ }
+ );
+};
+
+r = function( key , values ){
+ var total = 0;
+ for ( var i=0; i<values.length; i++ ){
+ total += values[i].count;
+ }
+ return { count : total };
+};
+
+doMR = function( n ){
+ var res = db.mr.mapReduce( m , r );
+ printjson( res );
+ var x = db[res.result];
+ assert.eq( 3 , x.find().count() , "MR T1 " + n );
+
+ var z = {};
+ x.find().forEach( function(a){ z[a._id] = a.value.count; } );
+ assert.eq( 3 , Object.keySet( z ).length , "MR T2 " + n );
+ assert.eq( 2 , z.a , "MR T2 " + n );
+ assert.eq( 3 , z.b , "MR T2 " + n );
+ assert.eq( 3 , z.c , "MR T2 " + n );
+
+ x.drop();
+}
+
+doMR( "before" );
+
+assert.eq( 1 , s.onNumShards( "mr" ) , "E1" );
+s.shardGo( "mr" , { x : 1 } , { x : 2 } , { x : 3 } );
+assert.eq( 2 , s.onNumShards( "mr" ) , "E1" );
+
+doMR( "after" );
+
+s.stop();
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
new file mode 100644
index 0000000..43e7cc5
--- /dev/null
+++ b/jstests/sharding/key_many.js
@@ -0,0 +1,121 @@
+// key_many.js
+
+// values have to be sorted
+types =
+ [ { name : "string" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield: "k" } ,
+ { name : "double" , values : [ 1.2 , 3.5 , 4.5 , 4.6 , 6.7 , 9.9 ] , keyfield : "a" } ,
+ { name : "string_id" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield : "_id" },
+ { name : "embedded" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield : "a.b" } ,
+ { name : "embedded 2" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield : "a.b.c" } ,
+ { name : "object" , values : [ {a:1, b:1.2}, {a:1, b:3.5}, {a:1, b:4.5}, {a:2, b:1.2}, {a:2, b:3.5}, {a:2, b:4.5} ] , keyfield : "o" } ,
+ ]
+
+s = new ShardingTest( "key_many" , 2 );
+
+s.adminCommand( { enablesharding : "test" } )
+db = s.getDB( "test" );
+primary = s.getServer( "test" ).getDB( "test" );
+seconday = s.getOther( primary ).getDB( "test" );
+
+function makeObjectDotted( v ){
+ var o = {};
+ o[curT.keyfield] = v;
+ return o;
+}
+
+function makeObject( v ){
+ var o = {};
+ var p = o;
+
+ var keys = curT.keyfield.split('.');
+ for(var i=0; i<keys.length-1; i++){
+ p[keys[i]] = {};
+ p = p[keys[i]];
+ }
+
+ p[keys[i]] = v;
+
+ return o;
+}
+
+function getKey( o ){
+ var keys = curT.keyfield.split('.');
+ for(var i=0; i<keys.length; i++){
+ o = o[keys[i]];
+ }
+ return o;
+}
+
+
+
+for ( var i=0; i<types.length; i++ ){
+ curT = types[i]; //global
+
+ print("\n\n#### Now Testing " + curT.name + " ####\n\n");
+
+ var shortName = "foo_" + curT.name;
+ var longName = "test." + shortName;
+
+ var c = db[shortName];
+ s.adminCommand( { shardcollection : longName , key : makeObjectDotted( 1 ) } );
+
+ assert.eq( 1 , s.config.chunks.find( { ns : longName } ).count() , curT.name + " sanity check A" );
+
+ var unsorted = Array.shuffle( Object.extend( [] , curT.values ) );
+ c.insert( makeObject( unsorted[0] ) );
+ for ( var x=1; x<unsorted.length; x++ )
+ c.save( makeObject( unsorted[x] ) );
+
+ assert.eq( 6 , c.find().count() , curT.name + " basic count" );
+
+ s.adminCommand( { split : longName , find : makeObjectDotted( curT.values[3] ) } );
+ s.adminCommand( { split : longName , find : makeObjectDotted( curT.values[3] ) } );
+ s.adminCommand( { split : longName , find : makeObjectDotted( curT.values[3] ) } );
+
+ s.adminCommand( { movechunk : longName , find : makeObjectDotted( curT.values[3] ) , to : seconday.getMongo().name } );
+
+ s.printChunks();
+
+ assert.eq( 3 , primary[shortName].find().toArray().length , curT.name + " primary count" );
+ assert.eq( 3 , seconday[shortName].find().toArray().length , curT.name + " secondary count" );
+
+ assert.eq( 6 , c.find().toArray().length , curT.name + " total count" );
+ assert.eq( 6 , c.find().sort( makeObjectDotted( 1 ) ).toArray().length , curT.name + " total count sorted" );
+
+ assert.eq( 6 , c.find().sort( makeObjectDotted( 1 ) ).count() , curT.name + " total count with count()" );
+
+ assert.eq( curT.values , c.find().sort( makeObjectDotted( 1 ) ).toArray().map( getKey ) , curT.name + " sort 1" );
+ assert.eq( curT.values.reverse() , c.find().sort( makeObjectDotted( -1 ) ).toArray().map( getKey ) , curT.name + " sort 2" );
+
+
+ assert.eq( 0 , c.find( { xx : 17 } ).sort( { zz : 1 } ).count() , curT.name + " xx 0a " );
+ assert.eq( 0 , c.find( { xx : 17 } ).sort( makeObjectDotted( 1 ) ).count() , curT.name + " xx 0b " );
+ assert.eq( 0 , c.find( { xx : 17 } ).count() , curT.name + " xx 0c " );
+ assert.eq( 0 , c.find( { xx : { $exists : true } } ).count() , curT.name + " xx 1 " );
+
+ c.update( makeObjectDotted( curT.values[3] ) , { $set : { xx : 17 } } );
+ assert.eq( 1 , c.find( { xx : { $exists : true } } ).count() , curT.name + " xx 2 " );
+ assert.eq( curT.values[3] , getKey( c.findOne( { xx : 17 } ) ) , curT.name + " xx 3 " );
+
+ c.update( makeObjectDotted( curT.values[3] ) , { $set : { xx : 17 } } , {upsert: true});
+ assert.eq( null , db.getLastError() , curT.name + " upserts should work if they include the shard key in the query" );
+
+ c.ensureIndex( { _id : 1 } , { unique : true } );
+ assert.eq( null , db.getLastError() , curT.name + " creating _id index should be ok" );
+
+ // multi update
+ var mysum = 0;
+ c.find().forEach( function(z){ mysum += z.xx || 0; } );
+ assert.eq( 17 , mysum, curT.name + " multi update pre" );
+ c.update( {} , { $inc : { xx : 1 } } , false , true );
+ var mysum = 0;
+ c.find().forEach( function(z){ mysum += z.xx || 0; } );
+ assert.eq( 23 , mysum, curT.name + " multi update" );
+
+ // TODO remove
+}
+
+
+s.stop();
+
+
diff --git a/jstests/sharding/key_string.js b/jstests/sharding/key_string.js
new file mode 100644
index 0000000..8ee1c70
--- /dev/null
+++ b/jstests/sharding/key_string.js
@@ -0,0 +1,44 @@
+// key_string.js
+
+s = new ShardingTest( "keystring" , 2 );
+
+db = s.getDB( "test" );
+s.adminCommand( { enablesharding : "test" } )
+s.adminCommand( { shardcollection : "test.foo" , key : { name : 1 } } );
+
+primary = s.getServer( "test" ).getDB( "test" );
+seconday = s.getOther( primary ).getDB( "test" );
+
+assert.eq( 1 , s.config.chunks.count() , "sanity check A" );
+
+db.foo.save( { name : "eliot" } )
+db.foo.save( { name : "sara" } )
+db.foo.save( { name : "bob" } )
+db.foo.save( { name : "joe" } )
+db.foo.save( { name : "mark" } )
+db.foo.save( { name : "allan" } )
+
+assert.eq( 6 , db.foo.find().count() , "basic count" );
+
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
+
+s.adminCommand( { movechunk : "test.foo" , find : { name : "joe" } , to : seconday.getMongo().name } );
+
+s.printChunks();
+
+assert.eq( 3 , primary.foo.find().toArray().length , "primary count" );
+assert.eq( 3 , seconday.foo.find().toArray().length , "secondary count" );
+
+assert.eq( 6 , db.foo.find().toArray().length , "total count" );
+assert.eq( 6 , db.foo.find().sort( { name : 1 } ).toArray().length , "total count sorted" );
+
+assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "total count with count()" );
+
+assert.eq( "allan,bob,eliot,joe,mark,sara" , db.foo.find().sort( { name : 1 } ).toArray().map( function(z){ return z.name; } ) , "sort 1" );
+assert.eq( "sara,mark,joe,eliot,bob,allan" , db.foo.find().sort( { name : -1 } ).toArray().map( function(z){ return z.name; } ) , "sort 2" );
+
+s.stop();
+
+
diff --git a/jstests/sharding/movePrimary1.js b/jstests/sharding/movePrimary1.js
new file mode 100644
index 0000000..20dc6c1
--- /dev/null
+++ b/jstests/sharding/movePrimary1.js
@@ -0,0 +1,31 @@
+
+
+s = new ShardingTest( "movePrimary1" , 2 );
+
+initDB = function( name ){
+ var db = s.getDB( name );
+ var c = db.foo;
+ c.save( { a : 1 } );
+ c.save( { a : 2 } );
+ c.save( { a : 3 } );
+ assert( 3 , c.count() );
+
+ return s.getServer( name );
+}
+
+from = initDB( "test1" );
+to = s.getOther( from );
+
+assert.eq( 3 , from.getDB( "test1" ).foo.count() , "from doesn't have data before move" );
+assert.eq( 0 , to.getDB( "test1" ).foo.count() , "to has data before move" );
+
+assert.eq( s.config.databases.findOne( { name : "test1" } ).primary , from.name , "not in db correctly to start" );
+s.admin.runCommand( { moveprimary : "test1" , to : to.name } );
+assert.eq( s.config.databases.findOne( { name : "test1" } ).primary , to.name , "to in config db didn't change" );
+
+
+assert.eq( 0 , from.getDB( "test1" ).foo.count() , "from still has data after move" );
+assert.eq( 3 , to.getDB( "test1" ).foo.count() , "to doesn't have data after move" );
+
+s.stop();
+
diff --git a/jstests/sharding/moveshard1.js b/jstests/sharding/moveshard1.js
new file mode 100644
index 0000000..b074b4c
--- /dev/null
+++ b/jstests/sharding/moveshard1.js
@@ -0,0 +1,39 @@
+// movechunk1.js
+
+s = new ShardingTest( "movechunk1" , 2 );
+
+l = s._connections[0];
+r = s._connections[1];
+
+ldb = l.getDB( "foo" );
+rdb = r.getDB( "foo" );
+
+ldb.things.save( { a : 1 } )
+ldb.things.save( { a : 2 } )
+ldb.things.save( { a : 3 } )
+
+assert.eq( ldb.things.count() , 3 );
+assert.eq( rdb.things.count() , 0 );
+
+startResult = l.getDB( "admin" ).runCommand( { "movechunk.start" : "foo.things" ,
+ "to" : s._serverNames[1] ,
+ "from" : s._serverNames[0] ,
+ filter : { a : { $gt : 2 } }
+ } );
+print( "movechunk.start: " + tojson( startResult ) );
+assert( startResult.ok == 1 , "start failed!" );
+
+finishResult = l.getDB( "admin" ).runCommand( { "movechunk.finish" : "foo.things" ,
+ finishToken : startResult.finishToken ,
+ to : s._serverNames[1] ,
+ newVersion : 1 } );
+print( "movechunk.finish: " + tojson( finishResult ) );
+assert( finishResult.ok == 1 , "finishResult failed!" );
+
+assert.eq( rdb.things.count() , 1 , "right has wrong size after move" );
+assert.eq( ldb.things.count() , 2 , "left has wrong size after move" );
+
+
+s.stop();
+
+
diff --git a/jstests/sharding/passthrough1.js b/jstests/sharding/passthrough1.js
new file mode 100644
index 0000000..d5df0d2
--- /dev/null
+++ b/jstests/sharding/passthrough1.js
@@ -0,0 +1,10 @@
+
+s = new ShardingTest( "passthrough1" , 2 )
+
+db = s.getDB( "test" );
+db.foo.insert( { num : 1 , name : "eliot" } );
+db.foo.insert( { num : 2 , name : "sara" } );
+db.foo.insert( { num : -1 , name : "joe" } );
+assert.eq( 3 , db.foo.find().length() );
+
+s.stop();
diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js
new file mode 100644
index 0000000..bbe1144
--- /dev/null
+++ b/jstests/sharding/shard1.js
@@ -0,0 +1,32 @@
+/**
+* this tests some of the ground work
+*/
+
+s = new ShardingTest( "shard1" , 2 );
+
+db = s.getDB( "test" );
+db.foo.insert( { num : 1 , name : "eliot" } );
+db.foo.insert( { num : 2 , name : "sara" } );
+db.foo.insert( { num : -1 , name : "joe" } );
+assert.eq( 3 , db.foo.find().length() );
+
+shardCommand = { shardcollection : "test.foo" , key : { num : 1 } };
+
+assert.throws( function(){ s.adminCommand( shardCommand ); } );
+
+s.adminCommand( { enablesharding : "test" } );
+assert.eq( 3 , db.foo.find().length() , "after partitioning count failed" );
+
+s.adminCommand( shardCommand );
+dbconfig = s.config.databases.findOne( { name : "test" } );
+assert.eq( dbconfig.sharded["test.foo"] , { key : { num : 1 } , unique : false } , "Sharded content" );
+
+assert.eq( 1 , s.config.chunks.count() );
+si = s.config.chunks.findOne();
+assert( si );
+assert.eq( si.ns , "test.foo" );
+
+assert.eq( 3 , db.foo.find().length() , "after sharding, no split count failed" );
+
+
+s.stop();
diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js
new file mode 100644
index 0000000..566a0db
--- /dev/null
+++ b/jstests/sharding/shard2.js
@@ -0,0 +1,194 @@
+// shard2.js
+
+/**
+* test basic sharding
+*/
+
+placeCheck = function( num ){
+ print("shard2 step: " + num );
+}
+
+s = new ShardingTest( "shard2" , 2 , 6 );
+
+db = s.getDB( "test" );
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+assert.eq( 1 , s.config.chunks.count() , "sanity check 1" );
+
+s.adminCommand( { split : "test.foo" , middle : { num : 0 } } );
+assert.eq( 2 , s.config.chunks.count() , "should be 2 shards" );
+chunks = s.config.chunks.find().toArray();
+assert.eq( chunks[0].shard , chunks[1].shard , "server should be the same after a split" );
+
+
+db.foo.save( { num : 1 , name : "eliot" } );
+db.foo.save( { num : 2 , name : "sara" } );
+db.foo.save( { num : -1 , name : "joe" } );
+
+s.adminCommand( "connpoolsync" );
+
+assert.eq( 3 , s.getServer( "test" ).getDB( "test" ).foo.find().length() , "not right directly to db A" );
+assert.eq( 3 , db.foo.find().length() , "not right on shard" );
+
+primary = s.getServer( "test" ).getDB( "test" );
+secondary = s.getOther( primary ).getDB( "test" );
+
+assert.eq( 3 , primary.foo.find().length() , "primary wrong B" );
+assert.eq( 0 , secondary.foo.find().length() , "secondary wrong C" );
+assert.eq( 3 , db.foo.find().sort( { num : 1 } ).length() );
+
+placeCheck( 2 );
+
+// NOTE: at this point we have 2 shard on 1 server
+
+// test move shard
+assert.throws( function(){ s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : primary.getMongo().name } ); } );
+assert.throws( function(){ s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : "adasd" } ) } );
+
+s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : secondary.getMongo().name } );
+assert.eq( 2 , secondary.foo.find().length() , "secondary should have 2 after move shard" );
+assert.eq( 1 , primary.foo.find().length() , "primary should only have 1 after move shard" );
+
+assert.eq( 2 , s.config.chunks.count() , "still should have 2 shards after move not:" + s.getChunksString() );
+chunks = s.config.chunks.find().toArray();
+assert.neq( chunks[0].shard , chunks[1].shard , "servers should NOT be the same after the move" );
+
+placeCheck( 3 );
+
+// test inserts go to right server/shard
+
+db.foo.save( { num : 3 , name : "bob" } );
+s.adminCommand( "connpoolsync" );
+assert.eq( 1 , primary.foo.find().length() , "after move insert go wrong place?" );
+assert.eq( 3 , secondary.foo.find().length() , "after move insert go wrong place?" );
+
+db.foo.save( { num : -2 , name : "funny man" } );
+s.adminCommand( "connpoolsync" );
+assert.eq( 2 , primary.foo.find().length() , "after move insert go wrong place?" );
+assert.eq( 3 , secondary.foo.find().length() , "after move insert go wrong place?" );
+
+
+db.foo.save( { num : 0 , name : "funny guy" } );
+s.adminCommand( "connpoolsync" );
+assert.eq( 2 , primary.foo.find().length() , "boundary A" );
+assert.eq( 4 , secondary.foo.find().length() , "boundary B" );
+
+placeCheck( 4 );
+
+// findOne
+assert.eq( "eliot" , db.foo.findOne( { num : 1 } ).name );
+assert.eq( "funny man" , db.foo.findOne( { num : -2 } ).name );
+
+// getAll
+function sumQuery( c ){
+ var sum = 0;
+ c.toArray().forEach(
+ function(z){
+ sum += z.num;
+ }
+ );
+ return sum;
+}
+assert.eq( 6 , db.foo.find().length() , "sharded query 1" );
+assert.eq( 3 , sumQuery( db.foo.find() ) , "sharded query 2" );
+
+placeCheck( 5 );
+
+// sort by num
+
+assert.eq( 3 , sumQuery( db.foo.find().sort( { num : 1 } ) ) , "sharding query w/sort 1" );
+assert.eq( 3 , sumQuery( db.foo.find().sort( { num : -1 } ) ) , "sharding query w/sort 2" );
+
+assert.eq( "funny man" , db.foo.find().sort( { num : 1 } )[0].name , "sharding query w/sort 3 order wrong" );
+assert.eq( -2 , db.foo.find().sort( { num : 1 } )[0].num , "sharding query w/sort 4 order wrong" );
+
+assert.eq( "bob" , db.foo.find().sort( { num : -1 } )[0].name , "sharding query w/sort 5 order wrong" );
+assert.eq( 3 , db.foo.find().sort( { num : -1 } )[0].num , "sharding query w/sort 6 order wrong" );
+
+placeCheck( 6 );
+// sory by name
+
+function getNames( c ){
+ return c.toArray().map( function(z){ return z.name; } );
+}
+correct = getNames( db.foo.find() ).sort();
+assert.eq( correct , getNames( db.foo.find().sort( { name : 1 } ) ) );
+correct = correct.reverse();
+assert.eq( correct , getNames( db.foo.find().sort( { name : -1 } ) ) );
+
+assert.eq( 3 , sumQuery( db.foo.find().sort( { name : 1 } ) ) , "sharding query w/non-shard sort 1" );
+assert.eq( 3 , sumQuery( db.foo.find().sort( { name : -1 } ) ) , "sharding query w/non-shard sort 2" );
+
+
+// sort by num multiple shards per server
+s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
+assert.eq( "funny man" , db.foo.find().sort( { num : 1 } )[0].name , "sharding query w/sort and another split 1 order wrong" );
+assert.eq( "bob" , db.foo.find().sort( { num : -1 } )[0].name , "sharding query w/sort and another split 2 order wrong" );
+assert.eq( "funny man" , db.foo.find( { num : { $lt : 100 } } ).sort( { num : 1 } ).arrayAccess(0).name , "sharding query w/sort and another split 3 order wrong" );
+
+placeCheck( 7 );
+
+// getMore
+assert.eq( 4 , db.foo.find().limit(-4).toArray().length , "getMore 1" );
+function countCursor( c ){
+ var num = 0;
+ while ( c.hasNext() ){
+ c.next();
+ num++;
+ }
+ return num;
+}
+assert.eq( 6 , countCursor( db.foo.find()._exec() ) , "getMore 2" );
+assert.eq( 6 , countCursor( db.foo.find().limit(1)._exec() ) , "getMore 3" );
+
+// find by non-shard-key
+db.foo.find().forEach(
+ function(z){
+ var y = db.foo.findOne( { _id : z._id } );
+ assert( y , "_id check 1 : " + tojson( z ) );
+ assert.eq( z.num , y.num , "_id check 2 : " + tojson( z ) );
+ }
+);
+
+// update
+person = db.foo.findOne( { num : 3 } );
+assert.eq( "bob" , person.name , "update setup 1" );
+person.name = "bob is gone";
+db.foo.update( { num : 3 } , person );
+person = db.foo.findOne( { num : 3 } );
+assert.eq( "bob is gone" , person.name , "update test B" );
+
+// remove
+assert( db.foo.findOne( { num : 3 } ) != null , "remove test A" );
+db.foo.remove( { num : 3 } );
+assert.isnull( db.foo.findOne( { num : 3 } ) , "remove test B" );
+
+db.foo.save( { num : 3 , name : "eliot2" } );
+person = db.foo.findOne( { num : 3 } );
+assert( person , "remove test C" );
+assert.eq( person.name , "eliot2" );
+
+db.foo.remove( { _id : person._id } );
+assert.isnull( db.foo.findOne( { num : 3 } ) , "remove test E" );
+
+placeCheck( 8 );
+
+// TODO: getLastError
+db.getLastError();
+db.getPrevError();
+
+// ---- move all to the secondary
+
+assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards" );
+
+secondary.foo.insert( { num : -3 } );
+
+s.adminCommand( { movechunk : "test.foo" , find : { num : -2 } , to : secondary.getMongo().name } );
+assert.eq( 1 , s.onNumShards( "foo" ) , "on 1 shards" );
+
+s.adminCommand( { movechunk : "test.foo" , find : { num : -2 } , to : primary.getMongo().name } );
+assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards again" );
+assert.eq( 3 , s.config.chunks.count() , "only 3 chunks" );
+
+s.stop();
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
new file mode 100644
index 0000000..8c5b184
--- /dev/null
+++ b/jstests/sharding/shard3.js
@@ -0,0 +1,130 @@
+// shard3.js
+
+s = new ShardingTest( "shard3" , 2 , 50 , 2 );
+
+s2 = s._mongos[1];
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+
+a = s.getDB( "test" ).foo;
+b = s2.getDB( "test" ).foo;
+
+primary = s.getServer( "test" ).getDB( "test" ).foo;
+secondary = s.getOther( primary.name ).getDB( "test" ).foo;
+
+a.save( { num : 1 } );
+a.save( { num : 2 } );
+a.save( { num : 3 } );
+
+assert.eq( 3 , a.find().toArray().length , "normal A" );
+assert.eq( 3 , b.find().toArray().length , "other A" );
+
+assert.eq( 3 , primary.count() , "p1" )
+assert.eq( 0 , secondary.count() , "s1" )
+
+assert.eq( 1 , s.onNumShards( "foo" ) , "on 1 shards" );
+
+s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
+s.adminCommand( { movechunk : "test.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
+
+assert( primary.find().toArray().length > 0 , "blah 1" );
+assert( secondary.find().toArray().length > 0 , "blah 2" );
+assert.eq( 3 , primary.find().itcount() + secondary.find().itcount() , "blah 3" )
+
+assert.eq( 3 , a.find().toArray().length , "normal B" );
+assert.eq( 3 , b.find().toArray().length , "other B" );
+
+// --- filtering ---
+
+function doCounts( name , total ){
+ total = total || ( primary.count() + secondary.count() );
+ assert.eq( total , a.count() , name + " count" );
+ assert.eq( total , a.find().sort( { n : 1 } ).itcount() , name + " itcount - sort n" );
+ assert.eq( total , a.find().itcount() , name + " itcount" );
+ assert.eq( total , a.find().sort( { _id : 1 } ).itcount() , name + " itcount - sort _id" );
+ return total;
+}
+
+var total = doCounts( "before wrong save" )
+secondary.save( { num : -3 } );
+doCounts( "after wrong save" , total )
+
+// --- move all to 1 ---
+print( "MOVE ALL TO 1" );
+
+assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards" );
+s.printCollectionInfo( "test.foo" );
+
+assert( a.findOne( { num : 1 } ) )
+assert( b.findOne( { num : 1 } ) )
+
+print( "GOING TO MOVE" );
+s.printCollectionInfo( "test.foo" );
+s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : s.getOther( s.getServer( "test" ) ).name } );
+s.printCollectionInfo( "test.foo" );
+assert.eq( 1 , s.onNumShards( "foo" ) , "on 1 shard again" );
+assert( a.findOne( { num : 1 } ) )
+assert( b.findOne( { num : 1 } ) )
+
+print( "*** drop" );
+
+s.printCollectionInfo( "test.foo" , "before drop" );
+a.drop();
+s.printCollectionInfo( "test.foo" , "after drop" );
+
+assert.eq( 0 , a.count() , "a count after drop" )
+assert.eq( 0 , b.count() , "b count after drop" )
+
+s.printCollectionInfo( "test.foo" , "after counts" );
+
+assert.eq( 0 , primary.count() , "p count after drop" )
+assert.eq( 0 , secondary.count() , "s count after drop" )
+
+primary.save( { num : 1 } );
+secondary.save( { num : 4 } );
+
+assert.eq( 1 , primary.count() , "p count after drop adn save" )
+assert.eq( 1 , secondary.count() , "s count after drop save " )
+
+
+print("*** makes sure that sharding knows where things live" );
+
+assert.eq( 1 , a.count() , "a count after drop and save" )
+s.printCollectionInfo( "test.foo" , "after a count" );
+assert.eq( 1 , b.count() , "b count after drop and save" )
+s.printCollectionInfo( "test.foo" , "after b count" );
+
+assert( a.findOne( { num : 1 } ) , "a drop1" );
+assert.isnull( a.findOne( { num : 4 } ) , "a drop1" );
+
+s.printCollectionInfo( "test.foo" , "after a findOne tests" );
+
+assert( b.findOne( { num : 1 } ) , "b drop1" );
+assert.isnull( b.findOne( { num : 4 } ) , "b drop1" );
+
+s.printCollectionInfo( "test.foo" , "after b findOne tests" );
+
+print( "*** dropDatabase setup" )
+
+s.printShardingStatus()
+s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+a.save( { num : 2 } );
+a.save( { num : 3 } );
+s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
+s.adminCommand( { movechunk : "test.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
+s.printShardingStatus();
+
+s.printCollectionInfo( "test.foo" , "after dropDatabase setup" );
+doCounts( "after dropDatabase setup2" )
+s.printCollectionInfo( "test.foo" , "after dropDatabase setup3" );
+
+print( "*** ready to call dropDatabase" )
+res = s.getDB( "test" ).dropDatabase();
+assert.eq( 1 , res.ok , "dropDatabase failed : " + tojson( res ) );
+
+s.printShardingStatus();
+s.printCollectionInfo( "test.foo" , "after dropDatabase call 1" );
+assert.eq( 0 , doCounts( "after dropDatabase called" ) )
+
+s.stop();
diff --git a/jstests/sharding/shard4.js b/jstests/sharding/shard4.js
new file mode 100644
index 0000000..2d7a0df
--- /dev/null
+++ b/jstests/sharding/shard4.js
@@ -0,0 +1,49 @@
+// shard4.js
+
+s = new ShardingTest( "shard4" , 2 , 50 , 2 );
+
+s2 = s._mongos[1];
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+
+s.getDB( "test" ).foo.save( { num : 1 } );
+s.getDB( "test" ).foo.save( { num : 2 } );
+s.getDB( "test" ).foo.save( { num : 3 } );
+s.getDB( "test" ).foo.save( { num : 4 } );
+s.getDB( "test" ).foo.save( { num : 5 } );
+s.getDB( "test" ).foo.save( { num : 6 } );
+s.getDB( "test" ).foo.save( { num : 7 } );
+
+assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal A" );
+assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other A" );
+
+s.adminCommand( { split : "test.foo" , middle : { num : 4 } } );
+s.adminCommand( { movechunk : "test.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
+
+assert( s._connections[0].getDB( "test" ).foo.find().toArray().length > 0 , "blah 1" );
+assert( s._connections[1].getDB( "test" ).foo.find().toArray().length > 0 , "blah 2" );
+assert.eq( 7 , s._connections[0].getDB( "test" ).foo.find().toArray().length +
+ s._connections[1].getDB( "test" ).foo.find().toArray().length , "blah 3" );
+
+assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B" );
+assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B" );
+
+s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
+//s.adminCommand( { movechunk : "test.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
+s.printChunks();
+
+print( "* A" );
+
+assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B 1" );
+assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B 2" );
+print( "* B" );
+assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B 3" );
+assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B 4" );
+
+for ( var i=0; i<10; i++ ){
+ print( "* C " + i );
+ assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B " + i );
+}
+
+s.stop();
diff --git a/jstests/sharding/shard5.js b/jstests/sharding/shard5.js
new file mode 100644
index 0000000..050a7d7
--- /dev/null
+++ b/jstests/sharding/shard5.js
@@ -0,0 +1,52 @@
+// shard5.js
+
+// tests write passthrough
+
+s = new ShardingTest( "shard5" , 2 , 50 , 2 );
+
+s2 = s._mongos[1];
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+
+s.getDB( "test" ).foo.save( { num : 1 } );
+s.getDB( "test" ).foo.save( { num : 2 } );
+s.getDB( "test" ).foo.save( { num : 3 } );
+s.getDB( "test" ).foo.save( { num : 4 } );
+s.getDB( "test" ).foo.save( { num : 5 } );
+s.getDB( "test" ).foo.save( { num : 6 } );
+s.getDB( "test" ).foo.save( { num : 7 } );
+
+assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal A" );
+assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other A" );
+
+s.adminCommand( { split : "test.foo" , middle : { num : 4 } } );
+s.adminCommand( { movechunk : "test.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
+
+assert( s._connections[0].getDB( "test" ).foo.find().toArray().length > 0 , "blah 1" );
+assert( s._connections[1].getDB( "test" ).foo.find().toArray().length > 0 , "blah 2" );
+assert.eq( 7 , s._connections[0].getDB( "test" ).foo.find().toArray().length +
+ s._connections[1].getDB( "test" ).foo.find().toArray().length , "blah 3" );
+
+assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B" );
+assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B" );
+
+s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
+//s.adminCommand( { movechunk : "test.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
+s.printChunks()
+
+print( "* A" );
+
+assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B 1" );
+
+s2.getDB( "test" ).foo.save( { num : 2 } );
+
+assert.soon(
+ function(){
+ return 8 == s2.getDB( "test" ).foo.find().toArray().length;
+ } , "other B 2" , 5000 , 100 )
+
+assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards" );
+
+
+s.stop();
diff --git a/jstests/sharding/shard6.js b/jstests/sharding/shard6.js
new file mode 100644
index 0000000..e15d74c
--- /dev/null
+++ b/jstests/sharding/shard6.js
@@ -0,0 +1,39 @@
+// shard6.js
+
+s = new ShardingTest( "shard6" , 2 , 0 , 1 );
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.data" , key : { num : 1 } } );
+
+db = s.getDB( "test" );
+
+// we want a lot of data, so lets make a 50k string to cheat :)
+bigString = "";
+while ( bigString.length < 50000 )
+ bigString += "this is a big string. ";
+
+// ok, now lets insert a some data
+var num = 0;
+for ( ; num<100; num++ ){
+ db.data.save( { num : num , bigString : bigString } );
+}
+
+assert.eq( 100 , db.data.find().toArray().length );
+
+// limit
+
+assert.eq( 77 , db.data.find().limit(77).itcount() , "limit test 1" );
+assert.eq( 1 , db.data.find().limit(1).itcount() , "limit test 2" );
+for ( var i=1; i<10; i++ ){
+ assert.eq( i , db.data.find().limit(i).itcount() , "limit test 3 : " + i );
+}
+
+
+// --- test save support ---
+
+o = db.data.findOne();
+o.x = 16;
+db.data.save( o );
+assert.eq( 16 , db.data.findOne( { _id : o._id } ).x , "x1 - did save fail?" );
+
+s.stop();
diff --git a/jstests/sharding/splitpick.js b/jstests/sharding/splitpick.js
new file mode 100644
index 0000000..ad27645
--- /dev/null
+++ b/jstests/sharding/splitpick.js
@@ -0,0 +1,33 @@
+// splitpick.js
+
+/**
+* tests picking the middle to split on
+*/
+
+s = new ShardingTest( "splitpick" , 2 );
+
+db = s.getDB( "test" );
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { a : 1 } } );
+
+c = db.foo;
+
+for ( var i=1; i<20; i++ ){
+ c.save( { a : i } );
+}
+c.save( { a : 99 } );
+
+assert.eq( s.admin.runCommand( { splitvalue : "test.foo" , find : { a : 1 } } ).middle.a , 1 , "splitvalue 1" );
+assert.eq( s.admin.runCommand( { splitvalue : "test.foo" , find : { a : 3 } } ).middle.a , 1 , "splitvalue 2" );
+
+s.adminCommand( { split : "test.foo" , find : { a : 1 } } );
+assert.eq( s.admin.runCommand( { splitvalue : "test.foo" , find : { a : 3 } } ).middle.a , 99 , "splitvalue 3" );
+s.adminCommand( { split : "test.foo" , find : { a : 99 } } );
+
+assert.eq( s.config.chunks.count() , 3 );
+s.printChunks();
+
+assert.eq( s.admin.runCommand( { splitvalue : "test.foo" , find : { a : 50 } } ).middle.a , 10 , "splitvalue 4 " );
+
+s.stop();
diff --git a/jstests/sharding/update1.js b/jstests/sharding/update1.js
new file mode 100644
index 0000000..82c3d8a
--- /dev/null
+++ b/jstests/sharding/update1.js
@@ -0,0 +1,33 @@
+s = new ShardingTest( "auto1" , 2 , 1 , 1 );
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.update1" , key : { key : 1 } } );
+
+db = s.getDB( "test" )
+coll = db.update1;
+
+coll.insert({_id:1, key:1});
+
+// these are upserts
+coll.save({_id:2, key:2});
+coll.save({_id:3, key:3});
+
+assert.eq(coll.count(), 3, "count A")
+
+// update existing using save()
+coll.save({_id:1, key:1, other:1});
+
+// update existing using update()
+coll.update({_id:2}, {key:2, other:2});
+//coll.update({_id:3, key:3}, {other:3}); //should add key to new object (doesn't work yet)
+coll.update({_id:3}, {key:3, other:3});
+
+assert.eq(coll.count(), 3, "count B")
+coll.find().forEach(function(x){
+ assert.eq(x._id, x.key, "_id == key");
+ assert.eq(x._id, x.other, "_id == other");
+});
+
+
+s.stop()
+
diff --git a/jstests/sharding/version1.js b/jstests/sharding/version1.js
new file mode 100644
index 0000000..0516aff
--- /dev/null
+++ b/jstests/sharding/version1.js
@@ -0,0 +1,23 @@
+// version1.js
+
+s = new ShardingTest( "version1" , 1 , 2 )
+
+a = s._connections[0].getDB( "admin" );
+
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).ok == 0 );
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : "a" } ).ok == 0 );
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , authoritative : true } ).ok == 0 );
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 0 , "should have failed b/c no auth" );
+
+assert.commandWorked( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 , authoritative : true } ) , "should have worked" );
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : "a" , version : 2 } ).ok == 0 );
+
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 1 );
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 1 } ).ok == 0 );
+
+assert.eq( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 3 } ).oldVersion.i , 2 , "oldVersion" );
+
+assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).mine.i , 3 , "my get version A" );
+assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).global.i , 3 , "my get version B" );
+
+s.stop();
diff --git a/jstests/sharding/version2.js b/jstests/sharding/version2.js
new file mode 100644
index 0000000..9683c92
--- /dev/null
+++ b/jstests/sharding/version2.js
@@ -0,0 +1,36 @@
+// version2.js
+
+s = new ShardingTest( "version2" , 1 , 2 )
+
+a = s._connections[0].getDB( "admin" );
+
+// setup from one client
+
+assert( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.i == 0 );
+assert( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.i == 0 );
+
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 , authoritative : true } ).ok == 1 );
+
+assert( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.i == 2 );
+assert( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.i == 2 );
+
+// from another client
+
+a2 = connect( s._connections[0].name + "/admin" );
+
+assert.eq( a2.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.i , 2 , "a2 global 1" );
+assert.eq( a2.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.i , 0 , "a2 mine 1" );
+
+function simpleFindOne(){
+ return a2.getMongo().getDB( "alleyinsider" ).foo.findOne();
+}
+
+assert.commandWorked( a2.runCommand( { "setShardVersion" : "alleyinsider.bar" , configdb : s._configDB , version : 2 , authoritative : true } ) , "setShardVersion bar temp");
+assert.throws( simpleFindOne , [] , "should complain about not in sharded mode 1" );
+assert( a2.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 1 , "setShardVersion a2-1");
+simpleFindOne(); // now should run ok
+assert( a2.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 3 } ).ok == 1 , "setShardVersion a2-2");
+simpleFindOne(); // newer version is ok
+
+
+s.stop();