summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
authorAntonin Kral <a.kral@bobek.cz>2011-03-17 00:05:43 +0100
committerAntonin Kral <a.kral@bobek.cz>2011-03-17 00:05:43 +0100
commit582fc32574a3b158c81e49cb00e6ae59205e66ba (patch)
treeac64a3243e0d2121709f685695247052858115c8 /jstests/sharding
parent2761bffa96595ac1698d86bbc2e95ebb0d4d6e93 (diff)
downloadmongodb-582fc32574a3b158c81e49cb00e6ae59205e66ba.tar.gz
Imported Upstream version 1.8.0
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/addshard3.js9
-rw-r--r--jstests/sharding/addshard4.js24
-rw-r--r--jstests/sharding/auto1.js5
-rw-r--r--jstests/sharding/bigMapReduce.js62
-rw-r--r--jstests/sharding/count1.js10
-rw-r--r--jstests/sharding/cursor1.js2
-rw-r--r--jstests/sharding/features1.js24
-rw-r--r--jstests/sharding/features2.js29
-rw-r--r--jstests/sharding/features3.js3
-rw-r--r--jstests/sharding/geo_near_random1.js37
-rw-r--r--jstests/sharding/geo_near_random2.js44
-rw-r--r--jstests/sharding/key_many.js6
-rw-r--r--jstests/sharding/key_string.js13
-rw-r--r--jstests/sharding/limit_push.js47
-rw-r--r--jstests/sharding/migrateBig.js45
-rw-r--r--jstests/sharding/multi_mongos1.js70
-rw-r--r--jstests/sharding/rename.js1
-rw-r--r--jstests/sharding/shard1.js1
-rw-r--r--jstests/sharding/shard3.js36
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js89
-rw-r--r--jstests/sharding/sort1.js46
-rw-r--r--jstests/sharding/splitpick.js39
-rw-r--r--jstests/sharding/sync1.js15
-rw-r--r--jstests/sharding/update1.js7
-rw-r--r--jstests/sharding/version1.js40
-rw-r--r--jstests/sharding/version2.js35
26 files changed, 617 insertions, 122 deletions
diff --git a/jstests/sharding/addshard3.js b/jstests/sharding/addshard3.js
new file mode 100644
index 0000000..aa5a21e
--- /dev/null
+++ b/jstests/sharding/addshard3.js
@@ -0,0 +1,9 @@
+
+s = new ShardingTest( "add_shard3", 1 );
+
+var result = s.admin.runCommand({"addshard" : "localhost:31000"});
+
+printjson(result);
+
+assert.eq(result.ok, 0, "don't add mongos as a shard");
+
diff --git a/jstests/sharding/addshard4.js b/jstests/sharding/addshard4.js
new file mode 100644
index 0000000..be4a8b3
--- /dev/null
+++ b/jstests/sharding/addshard4.js
@@ -0,0 +1,24 @@
+// a replica set's passive nodes should be okay to add as part of a shard config
+
+s = new ShardingTest( "addshard4", 2 , 0 , 1 , {useHostname : true});
+
+r = new ReplSetTest({name : "addshard4", nodes : 3, startPort : 34000});
+r.startSet();
+
+var config = r.getReplSetConfig();
+config.members[2].priority = 0;
+
+r.initiate(config);
+
+var master = r.getMaster().master;
+
+var members = config.members.map(function(elem) { return elem.host; });
+var shardName = "addshard4/"+members.join(",");
+
+print("adding shard "+shardName);
+
+var result = s.adminCommand({"addshard" : shardName});
+
+printjson(result);
+
+
diff --git a/jstests/sharding/auto1.js b/jstests/sharding/auto1.js
index 346c43a..bdd43e9 100644
--- a/jstests/sharding/auto1.js
+++ b/jstests/sharding/auto1.js
@@ -32,15 +32,19 @@ print( "datasize: " + tojson( s.getServer( "test" ).getDB( "admin" ).runCommand(
for ( ; i<200; i++ ){
coll.save( { num : i , s : bigString } );
}
+db.getLastError();
s.printChunks()
+s.printChangeLog()
counts.push( s.config.chunks.count() );
for ( ; i<400; i++ ){
coll.save( { num : i , s : bigString } );
}
+db.getLastError();
s.printChunks();
+s.printChangeLog()
counts.push( s.config.chunks.count() );
for ( ; i<700; i++ ){
@@ -49,6 +53,7 @@ for ( ; i<700; i++ ){
db.getLastError();
s.printChunks();
+s.printChangeLog()
counts.push( s.config.chunks.count() );
assert( counts[counts.length-1] > counts[0] , "counts 1 : " + tojson( counts ) )
diff --git a/jstests/sharding/bigMapReduce.js b/jstests/sharding/bigMapReduce.js
index 1cc12f4..3cc1d66 100644
--- a/jstests/sharding/bigMapReduce.js
+++ b/jstests/sharding/bigMapReduce.js
@@ -7,11 +7,69 @@ db = s.getDB( "test" );
var str=""
for (i=0;i<4*1024;i++) { str=str+"a"; }
for (j=0; j<50; j++) for (i=0; i<512; i++){ db.foo.save({y:str})}
+db.getLastError();
+
+s.printChunks();
+s.printChangeLog();
function map() { emit('count', 1); }
function reduce(key, values) { return Array.sum(values) }
-out = db.foo.mapReduce(map, reduce)
-printjson(out) // SERVER-1400
+gotAGoodOne = false;
+
+for ( iter=0; iter<5; iter++ ){
+ try {
+ out = db.foo.mapReduce(map, reduce,"big_out")
+ gotAGoodOne = true
+ }
+ catch ( e ){
+ if ( __mrerror__ && __mrerror__.cause && __mrerror__.cause.assertionCode == 13388 ){
+ // TODO: SERVER-2396
+ sleep( 1000 );
+ continue;
+ }
+ printjson( __mrerror__ );
+ throw e;
+ }
+}
+assert( gotAGoodOne , "no good for basic" )
+
+gotAGoodOne = false;
+// test output to a different DB
+// do it multiple times so that primary shard changes
+for (iter = 0; iter < 5; iter++) {
+ outCollStr = "mr_replace_col_" + iter;
+ outDbStr = "mr_db_" + iter;
+
+ print("Testing mr replace into DB " + iter)
+
+ try {
+ res = db.foo.mapReduce( map , reduce , { out : { replace: outCollStr, db: outDbStr } } )
+ gotAGoodOne = true;
+ }
+ catch ( e ){
+ if ( __mrerror__ && __mrerror__.cause && __mrerror__.cause.assertionCode == 13388 ){
+ // TODO: SERVER-2396
+ sleep( 1000 );
+ continue;
+ }
+ printjson( __mrerror__ );
+ throw e;
+ }
+ printjson(res);
+
+ outDb = s.getDB(outDbStr);
+ outColl = outDb[outCollStr];
+
+ obj = outColl.convertToSingleObject("value");
+ assert.eq( 25600 , obj.count , "Received wrong result " + obj.count );
+
+ print("checking result field");
+ assert.eq(res.result.collection, outCollStr, "Wrong collection " + res.result.collection);
+ assert.eq(res.result.db, outDbStr, "Wrong db " + res.result.db);
+}
+
+assert( gotAGoodOne , "no good for out db" )
s.stop()
+
diff --git a/jstests/sharding/count1.js b/jstests/sharding/count1.js
index ed69d1f..cc3f712 100644
--- a/jstests/sharding/count1.js
+++ b/jstests/sharding/count1.js
@@ -27,14 +27,16 @@ db.foo.save( { _id : 6 , name : "allan" } )
assert.eq( 6 , db.foo.find().count() , "basic count" );
-s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
-s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
-s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } ); // [Minkey -> allan) , * [allan -> ..)
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } ); // * [allan -> sara) , [sara -> Maxkey)
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } ); // [alan -> joe) , [joe -> sara]
+
+s.printChunks()
assert.eq( 6 , db.foo.find().count() , "basic count after split " );
assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "basic count after split sorted " );
-s.adminCommand( { movechunk : "test.foo" , find : { name : "joe" } , to : secondary.getMongo().name } );
+s.adminCommand( { movechunk : "test.foo" , find : { name : "allan" } , to : secondary.getMongo().name } );
assert.eq( 3 , primary.foo.find().toArray().length , "primary count" );
assert.eq( 3 , secondary.foo.find().toArray().length , "secondary count" );
diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js
index 2a30936..f6cb9e4 100644
--- a/jstests/sharding/cursor1.js
+++ b/jstests/sharding/cursor1.js
@@ -53,7 +53,7 @@ sleep( 6000 )
assert( cur.next() , "T3" )
assert( cur.next() , "T4" );
sleep( 22000 )
-assert.throws( function(){ cur.next(); } , "T5" )
+assert.throws( function(){ cur.next(); } , null , "T5" )
after = db.runCommand( { "cursorInfo" : 1 , "setTimeout" : 10000 } ) // 10 seconds
gc(); gc()
diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js
index 05b8b8c..c22f094 100644
--- a/jstests/sharding/features1.js
+++ b/jstests/sharding/features1.js
@@ -81,10 +81,10 @@ assert.eq( 1 , db.foo3.count() , "eval pre1" );
assert.eq( 1 , db.foo2.count() , "eval pre2" );
assert.eq( 8 , db.eval( function(){ return db.foo3.findOne().a; } ), "eval 1 " );
-assert.throws( function(){ db.eval( function(){ return db.foo2.findOne().a; } ) } , "eval 2" )
+assert.throws( function(){ db.eval( function(){ return db.foo2.findOne().a; } ) } , null , "eval 2" )
assert.eq( 1 , db.eval( function(){ return db.foo3.count(); } ), "eval 3 " );
-assert.throws( function(){ db.eval( function(){ return db.foo2.count(); } ) } , "eval 4" )
+assert.throws( function(){ db.eval( function(){ return db.foo2.count(); } ) } , null , "eval 4" )
// ---- unique shard key ----
@@ -105,6 +105,14 @@ assert.eq( 2 , b.foo4.getIndexes().length , "ub2" );
assert( a.foo4.getIndexes()[1].unique , "ua3" );
assert( b.foo4.getIndexes()[1].unique , "ub3" );
+assert.eq( 2 , db.foo4.count() , "uc1" )
+db.foo4.save( { num : 7 } )
+assert.eq( 3 , db.foo4.count() , "uc2" )
+db.foo4.save( { num : 7 } )
+gle = db.getLastErrorObj();
+assert( gle.err , "uc3" )
+assert.eq( 3 , db.foo4.count() , "uc4" )
+
// --- don't let you convertToCapped ----
assert( ! db.foo4.isCapped() , "ca1" );
assert( ! a.foo4.isCapped() , "ca2" );
@@ -152,12 +160,22 @@ assert.throws( function(){ db.foo6.group( { key : { a : 1 } , initial : { count
// ---- can't shard non-empty collection without index -----
db.foo8.save( { a : 1 } );
+db.getLastError();
assert( ! s.admin.runCommand( { shardcollection : "test.foo8" , key : { a : 1 } } ).ok , "non-empty collection" );
+
+// ---- can't shard non-empty collection with null values in shard key ----
+
+db.foo9.save( { b : 1 } );
+db.getLastError();
+db.foo9.ensureIndex( { a : 1 } );
+assert( ! s.admin.runCommand( { shardcollection : "test.foo9" , key : { a : 1 } } ).ok , "entry with null value" );
+
+
// --- listDatabases ---
r = db.getMongo().getDBs()
-assert.eq( 4 , r.databases.length , "listDatabases 1 : " + tojson( r ) )
+assert.eq( 3 , r.databases.length , "listDatabases 1 : " + tojson( r ) )
assert.lt( 10000 , r.totalSize , "listDatabases 2 : " + tojson( r ) );
s.stop()
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index dfb2883..b2070ea 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -92,8 +92,10 @@ r = function( key , values ){
doMR = function( n ){
print(n);
-
- var res = db.mr.mapReduce( m , r );
+
+ // on-disk
+
+ var res = db.mr.mapReduce( m , r , "smr1_out" );
printjson( res );
assert.eq( new NumberLong(4) , res.counts.input , "MR T0 " + n );
@@ -103,11 +105,26 @@ doMR = function( n ){
var z = {};
x.find().forEach( function(a){ z[a._id] = a.value.count; } );
assert.eq( 3 , Object.keySet( z ).length , "MR T2 " + n );
- assert.eq( 2 , z.a , "MR T2 " + n );
- assert.eq( 3 , z.b , "MR T2 " + n );
- assert.eq( 3 , z.c , "MR T2 " + n );
+ assert.eq( 2 , z.a , "MR T3 " + n );
+ assert.eq( 3 , z.b , "MR T4 " + n );
+ assert.eq( 3 , z.c , "MR T5 " + n );
x.drop();
+
+ // inline
+
+ var res = db.mr.mapReduce( m , r , { out : { inline : 1 } } );
+ printjson( res );
+ assert.eq( new NumberLong(4) , res.counts.input , "MR T6 " + n );
+
+ var z = {};
+ res.find().forEach( function(a){ z[a._id] = a.value.count; } );
+ printjson( z );
+ assert.eq( 3 , Object.keySet( z ).length , "MR T7 " + n ) ;
+ assert.eq( 2 , z.a , "MR T8 " + n );
+ assert.eq( 3 , z.b , "MR T9 " + n );
+ assert.eq( 3 , z.c , "MR TA " + n );
+
}
doMR( "before" );
@@ -124,7 +141,7 @@ s.adminCommand({movechunk:'test.mr', find:{x:3}, to: s.getServer('test').name }
doMR( "after extra split" );
-cmd = { mapreduce : "mr" , map : "emit( " , reduce : "fooz + " };
+cmd = { mapreduce : "mr" , map : "emit( " , reduce : "fooz + " , out : "broken1" };
x = db.runCommand( cmd );
y = s._connections[0].getDB( "test" ).runCommand( cmd );
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
index b15ccd3..b28d88e 100644
--- a/jstests/sharding/features3.js
+++ b/jstests/sharding/features3.js
@@ -1,4 +1,3 @@
-
s = new ShardingTest( "features3" , 2 , 1 , 1 );
s.adminCommand( { enablesharding : "test" } );
@@ -25,7 +24,7 @@ assert.eq( N / 2 , x.shards.shard0001.count , "count on shard0001" )
start = new Date()
print( "about to fork shell: " + Date() )
-join = startParallelShell( "db.foo.find( function(){ x = \"\"; for ( i=0; i<10000; i++ ){ x+=i; } return true; } ).itcount()" )
+join = startParallelShell( "db.foo.find( function(){ x = ''; for ( i=0; i<10000; i++ ){ x+=i; } return true; } ).itcount()" )
print( "after forking shell: " + Date() )
function getMine( printInprog ){
diff --git a/jstests/sharding/geo_near_random1.js b/jstests/sharding/geo_near_random1.js
new file mode 100644
index 0000000..6ffd4b2
--- /dev/null
+++ b/jstests/sharding/geo_near_random1.js
@@ -0,0 +1,37 @@
+// this tests all points using $near
+load("jstests/libs/geo_near_random.js");
+
+var testName = "geo_near_random1";
+var s = new ShardingTest( testName , 3 );
+
+db = s.getDB("test"); // global db
+
+var test = new GeoNearRandomTest(testName);
+
+s.adminCommand({enablesharding:'test'});
+s.adminCommand({shardcollection: ('test.' + testName), key: {_id:1} });
+
+test.insertPts(50);
+
+for (var i = (test.nPts/10); i < test.nPts; i+= (test.nPts/10)){
+ s.adminCommand({split: ('test.' + testName), middle: {_id: i} });
+ try {
+ s.adminCommand({moveChunk: ('test.' + testName), find: {_id: i-1}, to: ('shard000' + (i%3))});
+ } catch (e) {
+ // ignore this error
+ if (! e.match(/that chunk is already on that shard/)){
+ throw e;
+ }
+ }
+}
+
+printShardingSizes()
+
+var opts = {sharded: true}
+test.testPt([0,0], opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+
+s.stop()
diff --git a/jstests/sharding/geo_near_random2.js b/jstests/sharding/geo_near_random2.js
new file mode 100644
index 0000000..4871e1e
--- /dev/null
+++ b/jstests/sharding/geo_near_random2.js
@@ -0,0 +1,44 @@
+// this tests 1% of all points using $near and $nearSphere
+load("jstests/libs/geo_near_random.js");
+
+var testName = "geo_near_random2";
+var s = new ShardingTest( testName , 3 );
+
+db = s.getDB("test"); // global db
+
+var test = new GeoNearRandomTest(testName);
+
+s.adminCommand({enablesharding:'test'});
+s.adminCommand({shardcollection: ('test.' + testName), key: {_id:1} });
+
+test.insertPts(5000);
+
+for (var i = (test.nPts/10); i < test.nPts; i+= (test.nPts/10)){
+ s.adminCommand({split: ('test.' + testName), middle: {_id: i} });
+ try {
+ s.adminCommand({moveChunk: ('test.' + testName), find: {_id: i-1}, to: ('shard000' + (i%3))});
+ } catch (e) {
+ // ignore this error
+ if (! e.match(/that chunk is already on that shard/)){
+ throw e;
+ }
+ }
+}
+
+printShardingSizes()
+
+opts = {sphere:0, nToTest:test.nPts*0.01, sharded:true};
+test.testPt([0,0], opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+
+opts.sphere = 1
+test.testPt([0,0], opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+
+s.stop()
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
index 1e0ba9d..3a8203f 100644
--- a/jstests/sharding/key_many.js
+++ b/jstests/sharding/key_many.js
@@ -20,7 +20,7 @@ s = new ShardingTest( "key_many" , 2 );
s.adminCommand( { enablesharding : "test" } )
db = s.getDB( "test" );
primary = s.getServer( "test" ).getDB( "test" );
-seconday = s.getOther( primary ).getDB( "test" );
+secondary = s.getOther( primary ).getDB( "test" );
function makeObjectDotted( v ){
var o = {};
@@ -97,12 +97,12 @@ for ( var i=0; i<types.length; i++ ){
s.adminCommand( { split : longName , find : makeObjectDotted( curT.values[3] ) } );
s.adminCommand( { split : longName , find : makeObjectDotted( curT.values[3] ) } );
- s.adminCommand( { movechunk : longName , find : makeObjectDotted( curT.values[3] ) , to : seconday.getMongo().name } );
+ s.adminCommand( { movechunk : longName , find : makeObjectDotted( curT.values[0] ) , to : secondary.getMongo().name } );
s.printChunks();
assert.eq( 3 , primary[shortName].find().toArray().length , curT.name + " primary count" );
- assert.eq( 3 , seconday[shortName].find().toArray().length , curT.name + " secondary count" );
+ assert.eq( 3 , secondary[shortName].find().toArray().length , curT.name + " secondary count" );
assert.eq( 6 , c.find().toArray().length , curT.name + " total count" );
assert.eq( 6 , c.find().sort( makeObjectDotted( 1 ) ).toArray().length , curT.name + " total count sorted" );
diff --git a/jstests/sharding/key_string.js b/jstests/sharding/key_string.js
index 8ee1c70..bbc5dfb 100644
--- a/jstests/sharding/key_string.js
+++ b/jstests/sharding/key_string.js
@@ -20,11 +20,11 @@ db.foo.save( { name : "allan" } )
assert.eq( 6 , db.foo.find().count() , "basic count" );
-s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
-s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
-s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } ); // [Minkey -> allan) , * [allan -> ..)
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } ); // * [allan -> sara) , [sara -> Maxkey)
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } ); // [alan -> joe) , [joe -> sara]
-s.adminCommand( { movechunk : "test.foo" , find : { name : "joe" } , to : seconday.getMongo().name } );
+s.adminCommand( { movechunk : "test.foo" , find : { name : "allan" } , to : seconday.getMongo().name } );
s.printChunks();
@@ -39,6 +39,11 @@ assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "total count with co
assert.eq( "allan,bob,eliot,joe,mark,sara" , db.foo.find().sort( { name : 1 } ).toArray().map( function(z){ return z.name; } ) , "sort 1" );
assert.eq( "sara,mark,joe,eliot,bob,allan" , db.foo.find().sort( { name : -1 } ).toArray().map( function(z){ return z.name; } ) , "sort 2" );
+// make sure we can't foce a split on an extreme key
+// [allan->joe)
+assert.throws( function(){ s.adminCommand( { split : "test.foo" , middle : { name : "allan" } } ) } );
+assert.throws( function(){ s.adminCommand( { split : "test.foo" , middle : { name : "joe" } } ) } );
+
s.stop();
diff --git a/jstests/sharding/limit_push.js b/jstests/sharding/limit_push.js
new file mode 100644
index 0000000..75ad271
--- /dev/null
+++ b/jstests/sharding/limit_push.js
@@ -0,0 +1,47 @@
+// This test is to ensure that limit() clauses are pushed down to the shards and evaluated
+// See: http://jira.mongodb.org/browse/SERVER-1896
+
+s = new ShardingTest( "limit_push", 2, 1, 1 );
+
+db = s.getDB( "test" );
+
+// Create some data
+for (i=0; i < 100; i++) { db.limit_push.insert({ _id : i, x: i}); }
+db.limit_push.ensureIndex( { x : 1 } );
+assert.eq( 100 , db.limit_push.find().length() , "Incorrect number of documents" );
+
+// Shard the collection
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.limit_push" , key : { x : 1 } } );
+
+// Now split the and move the data between the shards
+s.adminCommand( { split : "test.limit_push", middle : { x : 50 }} );
+s.adminCommand( { moveChunk: "test.limit_push", find : { x : 51}, to : "shard0000" })
+
+// Check that the chunck have split correctly
+assert.eq( 2 , s.config.chunks.count() , "wrong number of chunks");
+
+// The query is asking for the maximum value below a given value
+// db.limit_push.find( { x : { $lt : 60} } ).sort( { x:-1} ).limit(1)
+q = { x : { $lt : 60} };
+
+// Make sure the basic queries are correct
+assert.eq( 60 , db.limit_push.find( q ).count() , "Did not find 60 documents" );
+//rs = db.limit_push.find( q ).sort( { x:-1} ).limit(1)
+//assert.eq( rs , { _id : "1" , x : 59 } , "Did not find document with value 59" );
+
+// Now make sure that the explain shos that each shard is returning a single document as indicated
+// by the "n" element for each shard
+exp = db.limit_push.find( q ).sort( { x:-1} ).limit(1).explain();
+printjson( exp )
+
+assert.eq("ParallelSort", exp.clusteredType, "Not a ParallelSort");
+
+var k = 0;
+for (var j in exp.shards) {
+ assert.eq( 1 , exp.shards[j][0].n, "'n' is not 1 from shard000" + k.toString());
+ k++
+}
+
+s.stop();
+
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
new file mode 100644
index 0000000..f6ba18a
--- /dev/null
+++ b/jstests/sharding/migrateBig.js
@@ -0,0 +1,45 @@
+
+s = new ShardingTest( "migrateBig" , 2 , 0 , 1 , { chunksize : 1 } );
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { x : 1 } } );
+
+db = s.getDB( "test" )
+coll = db.foo
+
+big = ""
+while ( big.length < 10000 )
+ big += "eliot"
+
+for ( x=0; x<100; x++ )
+ coll.insert( { x : x , big : big } )
+
+s.adminCommand( { split : "test.foo" , middle : { x : 33 } } )
+s.adminCommand( { split : "test.foo" , middle : { x : 66 } } )
+s.adminCommand( { movechunk : "test.foo" , find : { x : 90 } , to : s.getOther( s.getServer( "test" ) ).name } )
+
+db.printShardingStatus()
+
+print( "YO : " + s.getServer( "test" ).host )
+direct = new Mongo( s.getServer( "test" ).host )
+print( "direct : " + direct )
+
+directDB = direct.getDB( "test" )
+
+for ( done=0; done<2*1024*1024; done+=big.length ){
+ directDB.foo.insert( { x : 50 + Math.random() , big : big } )
+ directDB.getLastError();
+}
+
+db.printShardingStatus()
+
+assert.throws( function(){ s.adminCommand( { movechunk : "test.foo" , find : { x : 50 } , to : s.getOther( s.getServer( "test" ) ).name } ); } , [] , "move should fail" )
+
+for ( i=0; i<20; i+= 2 )
+ s.adminCommand( { split : "test.foo" , middle : { x : i } } )
+
+db.printShardingStatus()
+
+assert.soon( function(){ var x = s.chunkDiff( "foo" , "test" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 120 * 1000 , 2000 )
+
+s.stop()
diff --git a/jstests/sharding/multi_mongos1.js b/jstests/sharding/multi_mongos1.js
new file mode 100644
index 0000000..cf9ebde
--- /dev/null
+++ b/jstests/sharding/multi_mongos1.js
@@ -0,0 +1,70 @@
+// multi_mongos.js
+
+// setup sharding with two mongos, s1 and s2
+s1 = new ShardingTest( "multi_mongos1" , 2 , 1 , 2 );
+s2 = s1._mongos[1];
+
+s1.adminCommand( { enablesharding : "test" } );
+s1.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+
+s1.config.databases.find().forEach( printjson )
+
+viaS1 = s1.getDB( "test" ).foo;
+viaS2 = s2.getDB( "test" ).foo;
+
+primary = s1.getServer( "test" ).getDB( "test" ).foo;
+secondary = s1.getOther( primary.name ).getDB( "test" ).foo;
+
+N = 4;
+for (i=1; i<=N; i++) {
+ viaS1.save( { num : i } );
+}
+
+// initial checks
+
+// both mongos see all elements
+assert.eq( N , viaS1.find().toArray().length , "normal A" );
+assert.eq( N , viaS2.find().toArray().length , "other A" );
+
+// all elements are in one of the shards
+assert.eq( N , primary.count() , "p1" )
+assert.eq( 0 , secondary.count() , "s1" )
+assert.eq( 1 , s1.onNumShards( "foo" ) , "on 1 shards" );
+
+//
+// STEP 1 (builds a bit of context so there should probably not be a step 2 in this same test)
+// where we try to issue a move chunk from a mongos that's stale
+// followed by a split on a valid chunk, albeit one with not the highest lastmod
+
+// split in [Minkey->1), [1->N), [N,Maxkey)
+s1.adminCommand( { split : "test.foo" , middle : { num : 1 } } );
+s1.adminCommand( { split : "test.foo" , middle : { num : N } } );
+
+// s2 is now stale w.r.t boundaires around { num: 1 }
+res = s2.getDB( "admin" ).runCommand( { movechunk : "test.foo" , find : { num : 1 } , to : s1.getOther( s1.getServer( "test" ) ).name } );
+assert.eq( 0 , res.ok , "a move with stale boundaries should not have succeeded" + tojson(res) );
+
+// s2 must have reloaded as a result of a failed move; retrying should work
+res = s2.getDB( "admin" ).runCommand( { movechunk : "test.foo" , find : { num : 1 } , to : s1.getOther( s1.getServer( "test" ) ).name } );
+assert.eq( 1 , res.ok , "mongos did not reload after a failed migrate" + tojson(res) );
+
+// s1 is not stale about the boundaries of [MinKey->1)
+// but we'll try to split a chunk whose lastmod.major was not touched by the previous move
+// in 1.6, that chunk would be with [Minkey->1) (where { num: -1 } falls)
+// after 1.6, it would be with [N->Maxkey] (where { num: N+1 } falls)
+// s.printShardingStatus()
+res = s1.getDB( "admin" ).runCommand( { split : "test.foo" , middle : { num : N+1 } } ); // replace with { num: -1 } instead in 1.6
+assert.eq( 1, res.ok , "split over accurate boudaries should have succeeded" + tojson(res) );
+
+// { num : 4 } is on primary
+// { num : 1 , 2 , 3 } are on secondary
+assert.eq( 1 , primary.find().toArray().length , "wrong count on primary" );
+assert.eq( 3 , secondary.find().toArray().length , "wrong count on secondary" );
+assert.eq( N , primary.find().itcount() + secondary.find().itcount() , "wrong total count" )
+
+assert.eq( N , viaS1.find().toArray().length , "normal B" );
+assert.eq( N , viaS2.find().toArray().length , "other B" );
+
+printjson( primary._db._adminCommand( "shardingState" ) );
+
+s1.stop(); \ No newline at end of file
diff --git a/jstests/sharding/rename.js b/jstests/sharding/rename.js
index aa6137d..fa27611 100644
--- a/jstests/sharding/rename.js
+++ b/jstests/sharding/rename.js
@@ -24,3 +24,4 @@ assert.eq(db.bar.findOne(), {_id:3}, '3.1');
assert.eq(db.bar.count(), 1, '3.2');
assert.eq(db.foo.count(), 0, '3.3');
+s.stop() \ No newline at end of file
diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js
index 1783238..ae382e4 100644
--- a/jstests/sharding/shard1.js
+++ b/jstests/sharding/shard1.js
@@ -21,6 +21,7 @@ assert.eq( 3 , db.foo.find().length() , "after partitioning count failed" );
s.adminCommand( shardCommand );
cconfig = s.config.collections.findOne( { _id : "test.foo" } );
+assert( cconfig , "why no collection entry for test.foo" )
delete cconfig.lastmod
delete cconfig.dropped
assert.eq( cconfig , { _id : "test.foo" , key : { num : 1 } , unique : false } , "Sharded content" );
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index e57dc1e..7132563 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -41,9 +41,10 @@ printjson( primary._db._adminCommand( "shardingState" ) );
// --- filtering ---
-function doCounts( name , total ){
+function doCounts( name , total , onlyItCounts ){
total = total || ( primary.count() + secondary.count() );
- assert.eq( total , a.count() , name + " count" );
+ if ( ! onlyItCounts )
+ assert.eq( total , a.count() , name + " count" );
assert.eq( total , a.find().sort( { n : 1 } ).itcount() , name + " itcount - sort n" );
assert.eq( total , a.find().itcount() , name + " itcount" );
assert.eq( total , a.find().sort( { _id : 1 } ).itcount() , name + " itcount - sort _id" );
@@ -51,8 +52,12 @@ function doCounts( name , total ){
}
var total = doCounts( "before wrong save" )
-//secondary.save( { num : -3 } );
-//doCounts( "after wrong save" , total )
+secondary.save( { num : -3 } );
+doCounts( "after wrong save" , total , true )
+e = a.find().explain();
+assert.eq( 3 , e.n , "ex1" )
+assert.eq( 4 , e.nscanned , "ex2" )
+assert.eq( 1 , e.nChunkSkips , "ex3" )
// --- move all to 1 ---
print( "MOVE ALL TO 1" );
@@ -89,27 +94,18 @@ s.printCollectionInfo( "test.foo" , "after counts" );
assert.eq( 0 , primary.count() , "p count after drop" )
assert.eq( 0 , secondary.count() , "s count after drop" )
+// NOTE
+// the following bypasses the sharding layer and writes straight to the servers
+// this is not supported at all but we'd like to leave this backdoor for now
primary.save( { num : 1 } );
secondary.save( { num : 4 } );
-
assert.eq( 1 , primary.count() , "p count after drop and save" )
assert.eq( 1 , secondary.count() , "s count after drop and save " )
+print("*** makes sure that sharded access respects the drop command" );
-print("*** makes sure that sharding knows where things live" );
-
-assert.eq( 1 , a.count() , "a count after drop and save" )
-s.printCollectionInfo( "test.foo" , "after a count" );
-assert.eq( 1 , b.count() , "b count after drop and save" )
-s.printCollectionInfo( "test.foo" , "after b count" );
-
-assert( a.findOne( { num : 1 } ) , "a drop1" );
-assert.isnull( a.findOne( { num : 4 } ) , "a drop1" );
-
-s.printCollectionInfo( "test.foo" , "after a findOne tests" );
-
-assert( b.findOne( { num : 1 } ) , "b drop1" );
-assert.isnull( b.findOne( { num : 4 } ) , "b drop1" );
+assert.isnull( a.findOne() , "lookup via mongos 'a' accessed dropped data" );
+assert.isnull( b.findOne() , "lookup via mongos 'b' accessed dropped data" );
s.printCollectionInfo( "test.foo" , "after b findOne tests" );
@@ -130,6 +126,8 @@ s.printCollectionInfo( "test.foo" , "after dropDatabase setup3" );
print( "*** ready to call dropDatabase" )
res = s.getDB( "test" ).dropDatabase();
assert.eq( 1 , res.ok , "dropDatabase failed : " + tojson( res ) );
+// Waiting for SERVER-2253
+// assert.eq( 0 , s.config.databases.count( { _id: "test" } ) , "database 'test' was dropped but still appears in configDB" );
s.printShardingStatus();
s.printCollectionInfo( "test.foo" , "after dropDatabase call 1" );
diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js
new file mode 100644
index 0000000..c722f21
--- /dev/null
+++ b/jstests/sharding/shard_insert_getlasterror_w2.js
@@ -0,0 +1,89 @@
+// replica set as solo shard
+// getLastError(2) fails on about every 170 inserts on my Macbook laptop -Tony
+// TODO: Add assertion code that catches hang
+
+load('jstests/libs/grid.js')
+
+function go() {
+
+ var N = 2000
+
+ // ~1KB string
+ var Text = ''
+ for (var i = 0; i < 40; i++)
+ Text += 'abcdefghijklmnopqrstuvwxyz'
+
+ // Create replica set with 3 servers
+ var repset1 = new ReplicaSet('repset1', 3) .begin()
+
+ // Add data to it
+ var conn1a = repset1.getMaster()
+ var db1a = conn1a.getDB('test')
+ for (var i = 0; i < N; i++) {
+ db1a['foo'].insert({x: i, text: Text})
+ db1a.getLastError(2) // wait to be copied to at least one secondary
+ }
+
+ // Create 3 sharding config servers
+ var configsetSpec = new ConfigSet(3)
+ var configsetConns = configsetSpec.begin()
+
+ // Create sharding router (mongos)
+ var routerSpec = new Router(configsetSpec)
+ var routerConn = routerSpec.begin()
+ var dba = routerConn.getDB('admin')
+ var db = routerConn.getDB('test')
+
+ // Add repset1 as only shard
+ addShard (routerConn, repset1.getURL())
+
+ // Enable sharding on test db and its collection foo
+ enableSharding (routerConn, 'test')
+ db['foo'].ensureIndex({x: 1})
+ shardCollection (routerConn, 'test', 'foo', {x: 1})
+
+ sleep(30000)
+ printjson (db['foo'].stats())
+ dba.printShardingStatus()
+ printjson (db['foo'].count())
+
+ // Test case where GLE should return an error
+ db.foo.insert({_id:'a', x:1});
+ db.foo.insert({_id:'a', x:1});
+ var x = db.getLastErrorObj(2, 30000)
+ assert.neq(x.err, null, tojson(x));
+
+ // Add more data
+ for (var i = N; i < 2*N; i++) {
+ db['foo'].insert({x: i, text: Text})
+ var x = db.getLastErrorObj(2, 30000) // wait to be copied to at least one secondary
+ if (i % 30 == 0) print(i)
+ if (i % 100 == 0 || x.err != null) printjson(x);
+ assert.eq(x.err, null, tojson(x));
+ }
+
+ // take down the slave and make sure it fails over
+ repset1.stop(1);
+ repset1.stop(2);
+ db.getMongo().setSlaveOk();
+ print("trying some queries");
+ assert.soon(function() { try {
+ db.foo.find().next();
+ }
+ catch(e) {
+ print(e);
+ return false;
+ }
+ return true;
+ });
+
+ // Done
+ routerSpec.end()
+ configsetSpec.end()
+ repset1.stopSet()
+
+ print('shard_insert_getlasterror_w2.js SUCCESS')
+}
+
+//Uncomment below to execute
+go()
diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js
index 0edb7a7..e2b287e 100644
--- a/jstests/sharding/sort1.js
+++ b/jstests/sharding/sort1.js
@@ -2,7 +2,7 @@
s = new ShardingTest( "sort1" , 2 , 0 , 2 )
s.adminCommand( { enablesharding : "test" } );
-s.adminCommand( { shardcollection : "test.data" , key : { num : 1 } } );
+s.adminCommand( { shardcollection : "test.data" , key : { 'sub.num' : 1 } } );
db = s.getDB( "test" );
@@ -11,16 +11,16 @@ N = 100
forward = []
backward = []
for ( i=0; i<N; i++ ){
- db.data.insert( { _id : i , num : i , x : N - i } )
+ db.data.insert( { _id : i , sub: {num : i , x : N - i }} )
forward.push( i )
backward.push( ( N - 1 ) - i )
}
db.getLastError();
-s.adminCommand( { split : "test.data" , middle : { num : 33 } } )
-s.adminCommand( { split : "test.data" , middle : { num : 66 } } )
+s.adminCommand( { split : "test.data" , middle : { 'sub.num' : 33 } } )
+s.adminCommand( { split : "test.data" , middle : { 'sub.num' : 66 } } )
-s.adminCommand( { movechunk : "test.data" , find : { num : 50 } , to : s.getOther( s.getServer( "test" ) ).name } );
+s.adminCommand( { movechunk : "test.data" , find : { 'sub.num' : 50 } , to : s.getOther( s.getServer( "test" ) ).name } );
assert.eq( 3 , s.config.chunks.find().itcount() , "A1" );
@@ -28,31 +28,31 @@ temp = s.config.chunks.find().sort( { min : 1 } ).toArray();
assert.eq( temp[0].shard , temp[2].shard , "A2" );
assert.neq( temp[0].shard , temp[1].shard , "A3" );
-temp = db.data.find().sort( { num : 1 } ).toArray();
+temp = db.data.find().sort( { 'sub.num' : 1 } ).toArray();
assert.eq( N , temp.length , "B1" );
for ( i=0; i<100; i++ ){
- assert.eq( i , temp[i].num , "B2" )
+ assert.eq( i , temp[i].sub.num , "B2" )
}
-db.data.find().sort( { num : 1 } ).toArray();
-s.getServer("test").getDB( "test" ).data.find().sort( { num : 1 } ).toArray();
+db.data.find().sort( { 'sub.num' : 1 } ).toArray();
+s.getServer("test").getDB( "test" ).data.find().sort( { 'sub.num' : 1 } ).toArray();
-a = Date.timeFunc( function(){ z = db.data.find().sort( { num : 1 } ).toArray(); } , 200 );
+a = Date.timeFunc( function(){ z = db.data.find().sort( { 'sub.num' : 1 } ).toArray(); } , 200 );
assert.eq( 100 , z.length , "C1" )
-b = 1.5 * Date.timeFunc( function(){ z = s.getServer("test").getDB( "test" ).data.find().sort( { num : 1 } ).toArray(); } , 200 );
+b = 1.5 * Date.timeFunc( function(){ z = s.getServer("test").getDB( "test" ).data.find().sort( { 'sub.num' : 1 } ).toArray(); } , 200 );
assert.eq( 67 , z.length , "C2" )
print( "a: " + a + " b:" + b + " mongos slow down: " + Math.ceil( 100 * ( ( a - b ) / b ) ) + "%" )
// -- secondary index sorting
-function getSorted( by , want , dir , proj ){
+function getSorted( by , dir , proj ){
var s = {}
s[by] = dir || 1;
printjson( s )
var cur = db.data.find( {} , proj || {} ).sort( s )
- return terse( cur.map( function(z){ return z[want]; } ) );
+ return terse( cur.map( function(z){ return z.sub.num; } ) );
}
function terse( a ){
@@ -68,14 +68,22 @@ function terse( a ){
forward = terse(forward);
backward = terse(backward);
-assert.eq( forward , getSorted( "num" , "num" , 1 ) , "D1" )
-assert.eq( backward , getSorted( "num" , "num" , -1 ) , "D2" )
+assert.eq( forward , getSorted( "sub.num" , 1 ) , "D1" )
+assert.eq( backward , getSorted( "sub.num" , -1 ) , "D2" )
-assert.eq( backward , getSorted( "x" , "num" , 1 ) , "D3" )
-assert.eq( forward , getSorted( "x" , "num" , -1 ) , "D4" )
+assert.eq( backward , getSorted( "sub.x" , 1 ) , "D3" )
+assert.eq( forward , getSorted( "sub.x" , -1 ) , "D4" )
-assert.eq( backward , getSorted( "x" , "num" , 1 , { num : 1 } ) , "D5" )
-assert.eq( forward , getSorted( "x" , "num" , -1 , { num : 1 } ) , "D6" )
+assert.eq( backward , getSorted( "sub.x" , 1 , { 'sub.num' : 1 } ) , "D5" )
+assert.eq( forward , getSorted( "sub.x" , -1 , { 'sub.num' : 1 } ) , "D6" )
+assert.eq( backward , getSorted( "sub.x" , 1 , { 'sub' : 1 } ) , "D7" )
+assert.eq( forward , getSorted( "sub.x" , -1 , { 'sub' : 1 } ) , "D8" )
+
+assert.eq( backward , getSorted( "sub.x" , 1 , { '_id' : 0 } ) , "D9" )
+assert.eq( forward , getSorted( "sub.x" , -1 , { '_id' : 0 } ) , "D10" )
+
+assert.eq( backward , getSorted( "sub.x" , 1 , { '_id' : 0, 'sub.num':1 } ) , "D11" )
+assert.eq( forward , getSorted( "sub.x" , -1 , { '_id' : 0, 'sub.num':1 } ) , "D12" )
s.stop();
diff --git a/jstests/sharding/splitpick.js b/jstests/sharding/splitpick.js
deleted file mode 100644
index 3733906..0000000
--- a/jstests/sharding/splitpick.js
+++ /dev/null
@@ -1,39 +0,0 @@
-// splitpick.js
-
-/**
-* tests picking the middle to split on
-*/
-
-s = new ShardingTest( "splitpick" , 2 );
-
-db = s.getDB( "test" );
-
-s.adminCommand( { enablesharding : "test" } );
-s.adminCommand( { shardcollection : "test.foo" , key : { a : 1 } } );
-
-c = db.foo;
-
-for ( var i=1; i<20; i++ ){
- c.save( { a : i } );
-}
-c.save( { a : 99 } );
-db.getLastError();
-
-function checkSplit( f, want , num ){
- x = s.admin.runCommand( { splitvalue : "test.foo" , find : { a : f } } );
- assert.eq( want, x.middle ? x.middle.a : null , "splitvalue " + num + " " + tojson( x ) );
-}
-
-checkSplit( 1 , 1 , "1" )
-checkSplit( 3 , 1 , "2" )
-
-s.adminCommand( { split : "test.foo" , find : { a : 1 } } );
-checkSplit( 3 , 99 , "3" )
-s.adminCommand( { split : "test.foo" , find : { a : 99 } } );
-
-assert.eq( s.config.chunks.count() , 3 );
-s.printChunks();
-
-checkSplit( 50 , 10 , "4" )
-
-s.stop();
diff --git a/jstests/sharding/sync1.js b/jstests/sharding/sync1.js
index e649387..2c1a8f7 100644
--- a/jstests/sharding/sync1.js
+++ b/jstests/sharding/sync1.js
@@ -13,14 +13,23 @@ assert.eq( 2 , t.find().count() , "A4" );
test.checkHashes( "test" , "A3" );
test.tempKill();
-assert.throws( function(){ t.save( { x : 3 } ) } , "B1" )
+assert.throws( function(){ t.save( { x : 3 } ) } , null , "B1" )
assert.eq( 2 , t.find().itcount() , "B2" );
test.tempStart();
test.checkHashes( "test" , "B3" );
-
assert.eq( 2 , t.find().itcount() , "C1" );
-t.remove( { x : 1 } )
+assert.soon( function(){
+ try {
+ t.remove( { x : 1 } )
+ return true;
+ }
+ catch ( e ){
+ print( e );
+ }
+ return false;
+} )
+t.find().forEach( printjson )
assert.eq( 1 , t.find().itcount() , "C2" );
test.stop();
diff --git a/jstests/sharding/update1.js b/jstests/sharding/update1.js
index 63d4bf6..6359050 100644
--- a/jstests/sharding/update1.js
+++ b/jstests/sharding/update1.js
@@ -42,5 +42,12 @@ assert.eq(err.code, 13123, 'key error code 2');
coll.update({_id:1, key:1}, {$set: {foo:2}});
assert.isnull(db.getLastError(), 'getLastError reset');
+coll.update( { key : 17 } , { $inc : { x : 5 } } , true );
+assert.eq( 5 , coll.findOne( { key : 17 } ).x , "up1" )
+
+coll.update( { key : 18 } , { $inc : { x : 5 } } , true , true );
+assert.eq( 5 , coll.findOne( { key : 18 } ).x , "up2" )
+
+
s.stop()
diff --git a/jstests/sharding/version1.js b/jstests/sharding/version1.js
index 0516aff..a16ead3 100644
--- a/jstests/sharding/version1.js
+++ b/jstests/sharding/version1.js
@@ -2,22 +2,46 @@
s = new ShardingTest( "version1" , 1 , 2 )
+s.adminCommand( { enablesharding : "alleyinsider" } );
+s.adminCommand( { shardcollection : "alleyinsider.foo" , key : { num : 1 } } );
+
+// alleyinsider.foo is supposed to have one chunk, version 1|0, in shard000
+s.printShardingStatus();
+
a = s._connections[0].getDB( "admin" );
assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).ok == 0 );
+
assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : "a" } ).ok == 0 );
+
assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , authoritative : true } ).ok == 0 );
-assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 0 , "should have failed b/c no auth" );
-assert.commandWorked( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 , authoritative : true } ) , "should have worked" );
-assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : "a" , version : 2 } ).ok == 0 );
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 0 ,
+ "should have failed b/c no auth" );
+
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 , authoritative : true } ) ,
+ "should have failed because first setShardVersion needs shard info" );
+
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 , authoritative : true ,
+ shard: "shard0000" , shardHost: "localhost:30000" } ) ,
+ "should have failed because version is config is 1|0" );
+
+assert.commandWorked( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB ,
+ version : new NumberLong( 4294967296 ), // 1|0
+ authoritative : true , shard: "shard0000" , shardHost: "localhost:30000" } ) ,
+ "should have worked" );
+
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : "a" , version : 2 } ).ok == 0 , "A" );
-assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 1 );
-assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 1 } ).ok == 0 );
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 0 , "B" );
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 1 } ).ok == 0 , "C" );
-assert.eq( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 3 } ).oldVersion.i , 2 , "oldVersion" );
+// the only way that setSharVersion passes is if the shard agrees with the version
+// the shard takes its version from config directly
+// TODO bump timestamps in config
+// assert.eq( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 3 } ).oldVersion.i , 2 , "oldVersion" );
-assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).mine.i , 3 , "my get version A" );
-assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).global.i , 3 , "my get version B" );
+// assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).mine.i , 3 , "my get version A" );
+// assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).global.i , 3 , "my get version B" );
s.stop();
diff --git a/jstests/sharding/version2.js b/jstests/sharding/version2.js
index 9683c92..f502fd3 100644
--- a/jstests/sharding/version2.js
+++ b/jstests/sharding/version2.js
@@ -2,6 +2,10 @@
s = new ShardingTest( "version2" , 1 , 2 )
+s.adminCommand( { enablesharding : "alleyinsider" } );
+s.adminCommand( { shardcollection : "alleyinsider.foo" , key : { num : 1 } } );
+s.adminCommand( { shardcollection : "alleyinsider.bar" , key : { num : 1 } } );
+
a = s._connections[0].getDB( "admin" );
// setup from one client
@@ -9,28 +13,41 @@ a = s._connections[0].getDB( "admin" );
assert( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.i == 0 );
assert( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.i == 0 );
-assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 , authoritative : true } ).ok == 1 );
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , authoritative : true ,
+ version : new NumberLong( 4294967296 ), // 1|0
+ shard: "shard0000" , shardHost: "localhost:30000" } ).ok == 1 );
+
+printjson( s.config.chunks.findOne() );
-assert( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.i == 2 );
-assert( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.i == 2 );
+assert( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.t == 1000 );
+assert( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.t == 1000 );
// from another client
a2 = connect( s._connections[0].name + "/admin" );
-assert.eq( a2.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.i , 2 , "a2 global 1" );
+assert.eq( a2.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.t , 1000 , "a2 global 1" );
assert.eq( a2.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.i , 0 , "a2 mine 1" );
function simpleFindOne(){
return a2.getMongo().getDB( "alleyinsider" ).foo.findOne();
}
-assert.commandWorked( a2.runCommand( { "setShardVersion" : "alleyinsider.bar" , configdb : s._configDB , version : 2 , authoritative : true } ) , "setShardVersion bar temp");
+assert.commandWorked( a2.runCommand( { "setShardVersion" : "alleyinsider.bar" , configdb : s._configDB , version : new NumberLong( 4294967296 ) , authoritative : true } ) , "setShardVersion bar temp");
+
assert.throws( simpleFindOne , [] , "should complain about not in sharded mode 1" );
-assert( a2.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 1 , "setShardVersion a2-1");
-simpleFindOne(); // now should run ok
-assert( a2.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 3 } ).ok == 1 , "setShardVersion a2-2");
-simpleFindOne(); // newer version is ok
+
+
+// the only way that setSharVersion passes is if the shard agrees with the version
+// the shard takes its version from config directly
+// TODO bump timestamps in config
+// assert( a2.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 1 , "setShardVersion a2-1");
+
+// simpleFindOne(); // now should run ok
+
+// assert( a2.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 3 } ).ok == 1 , "setShardVersion a2-2");
+
+// simpleFindOne(); // newer version is ok
s.stop();