diff options
author | Antonin Kral <a.kral@bobek.cz> | 2010-08-11 12:38:57 +0200 |
---|---|---|
committer | Antonin Kral <a.kral@bobek.cz> | 2010-08-11 12:38:57 +0200 |
commit | 7645618fd3914cb8a20561625913c20d49504a49 (patch) | |
tree | 8370f846f58f6d71165b7a0e2eda04648584ec76 /jstests/sharding | |
parent | 68c73c3c7608b4c87f07440dc3232801720b1168 (diff) | |
download | mongodb-7645618fd3914cb8a20561625913c20d49504a49.tar.gz |
Imported Upstream version 1.6.0
Diffstat (limited to 'jstests/sharding')
31 files changed, 1109 insertions, 142 deletions
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js new file mode 100644 index 0000000..f28feed --- /dev/null +++ b/jstests/sharding/addshard1.js @@ -0,0 +1,56 @@ +s = new ShardingTest( "add_shard1", 1 ); + +assert.eq( 1, s.config.shards.count(), "initial server count wrong" ); + +// create a shard and add a database; if the database is not duplicated the mongod should accepted +// it as shard +conn1 = startMongodTest( 29000 ); + +db1 = conn1.getDB( "testDB" ); +numObjs = 0; +for (i=0; i<3; i++){ + db1.foo.save( { a : i } ); + numObjs++; +} +db1.getLastError() + +newShard = "myShard"; +assert( s.admin.runCommand( { addshard: "localhost:29000" , name: newShard } ).ok, "did not accepted non-duplicated shard" ); + +// a mongod with an existing database name should not be allowed to become a shard +conn2 = startMongodTest( 29001 ); +db2 = conn2.getDB( "otherDB" ); +db2.foo.save( {a:1} ); +db2.getLastError() +db3 = conn2.getDB( "testDB" ); +db3.foo.save( {a:1} ); +db3.getLastError() + +s.config.databases.find().forEach( printjson ) +rejectedShard = "rejectedShard"; +assert( ! s.admin.runCommand( { addshard: "localhost:29001" , name : rejectedShard } ).ok, "accepted mongod with duplicate db" ); + +// check that all collection that were local to the mongod's are accessible through the mongos +sdb1 = s.getDB( "testDB" ); +assert.eq( numObjs , sdb1.foo.count() , "wrong count for database that existed before addshard" ); +sdb2 = s.getDB( "otherDB" ); +assert.eq( 0 , sdb2.foo.count() , "database of rejected shard appears through mongos" ); + +// make sure we can move a DB from the original mongod to a previoulsy existing shard +assert.eq( s.normalize( s.config.databases.findOne( { _id : "testDB" } ).primary ), newShard , "DB primary is wrong" ); +origShard = s.getNonPrimaries( "testDB" )[0]; +s.adminCommand( { moveprimary : "testDB" , to : origShard } ); +assert.eq( s.normalize( s.config.databases.findOne( { _id : "testDB" } ).primary ), origShard , "DB primary didn't move" ); +assert.eq( numObjs , sdb1.foo.count() , "wrong count after moving datbase that existed before addshard" ); + +// make sure we can shard the original collections +sdb1.foo.ensureIndex( { a : 1 } ) // can't shard populated collection without an index +s.adminCommand( { enablesharding : "testDB" } ); +s.adminCommand( { shardcollection : "testDB.foo" , key: { a : 1 } } ); +s.adminCommand( { split : "testDB.foo", middle: { a : Math.floor(numObjs/2) } } ); +assert.eq( 2 , s.config.chunks.count(), "wrong chunk number after splitting collection that existed before" ); +assert.eq( numObjs , sdb1.foo.count() , "wrong count after splitting collection that existed before" ); + +stopMongod( 29000 ); +stopMongod( 29001 ); +s.stop(); diff --git a/jstests/sharding/auto1.js b/jstests/sharding/auto1.js index 92a4ce8..346c43a 100644 --- a/jstests/sharding/auto1.js +++ b/jstests/sharding/auto1.js @@ -14,38 +14,50 @@ coll = db.foo; var i=0; -for ( ; i<500; i++ ){ +for ( ; i<100; i++ ){ coll.save( { num : i , s : bigString } ); } - -s.adminCommand( "connpoolsync" ); +db.getLastError(); primary = s.getServer( "test" ).getDB( "test" ); -assert.eq( 1 , s.config.chunks.count() ); -assert.eq( 500 , primary.foo.count() ); +counts = [] + +s.printChunks(); +counts.push( s.config.chunks.count() ); +assert.eq( 100 , primary.foo.count() ); print( "datasize: " + tojson( s.getServer( "test" ).getDB( "admin" ).runCommand( { datasize : "test.foo" } ) ) ); -for ( ; i<800; i++ ){ +for ( ; i<200; i++ ){ coll.save( { num : i , s : bigString } ); } -assert.eq( 1 , s.config.chunks.count() ); +s.printChunks() +counts.push( s.config.chunks.count() ); -for ( ; i<1500; i++ ){ +for ( ; i<400; i++ ){ coll.save( { num : i , s : bigString } ); } -assert.eq( 3 , s.config.chunks.count() , "shard didn't split A " ); s.printChunks(); +counts.push( s.config.chunks.count() ); -for ( ; i<3000; i++ ){ +for ( ; i<700; i++ ){ coll.save( { num : i , s : bigString } ); } +db.getLastError(); -assert.eq( 4 , s.config.chunks.count() , "shard didn't split B " ); s.printChunks(); +counts.push( s.config.chunks.count() ); + +assert( counts[counts.length-1] > counts[0] , "counts 1 : " + tojson( counts ) ) +sorted = counts.slice(0) +sorted.sort(); +assert.eq( counts , sorted , "counts 2 : " + tojson( counts ) ) + +print( counts ) +printjson( db.stats() ) s.stop(); diff --git a/jstests/sharding/auto2.js b/jstests/sharding/auto2.js index c6ec374..5ac9cd9 100644 --- a/jstests/sharding/auto2.js +++ b/jstests/sharding/auto2.js @@ -1,6 +1,6 @@ // auto2.js -s = new ShardingTest( "auto2" , 2 , 1 , 1 ); +s = new ShardingTest( "auto2" , 2 , 5 , 2 ); s.adminCommand( { enablesharding : "test" } ); s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } ); @@ -26,19 +26,116 @@ for ( j=0; j<30; j++ ){ ) ); } +assert.eq( i , j * 100 , "setup" ); s.adminCommand( "connpoolsync" ); +db.getLastError(); print( "done inserting data" ); print( "datasize: " + tojson( s.getServer( "test" ).getDB( "admin" ).runCommand( { datasize : "test.foo" } ) ) ); s.printChunks(); -counta = s._connections[0].getDB( "test" ).foo.count(); -countb = s._connections[1].getDB( "test" ).foo.count(); +function doCountsGlobal(){ + counta = s._connections[0].getDB( "test" ).foo.count(); + countb = s._connections[1].getDB( "test" ).foo.count(); + return counta + countb; +} + +doCountsGlobal() + +assert( counta > 0 , "diff1" ); +assert( countb > 0 , "diff2" ); + +print( "checkpoint B" ) + +var missing = []; + +for ( i=0; i<j*100; i++ ){ + var x = coll.findOne( { num : i } ); + if ( ! x ){ + missing.push( i ); + print( "can't find: " + i ); + sleep( 5000 ); + x = coll.findOne( { num : i } ); + if ( ! x ){ + print( "still can't find: " + i ); + + for ( var zzz=0; zzz<s._connections.length; zzz++ ){ + if ( s._connections[zzz].getDB( "test" ).foo.findOne( { num : i } ) ){ + print( "found on wrong server: " + s._connections[zzz] ); + } + } + + } + } +} + -assert.eq( j * 100 , counta + countb , "from each a:" + counta + " b:" + countb + " i:" + i ); + +s.printChangeLog(); + +print( "missing: " + tojson( missing ) ) +assert.soon( function(z){ return doCountsGlobal() == j * 100; } , "from each a:" + counta + " b:" + countb + " i:" + i ); +print( "checkpoint B.a" ) +s.printChunks(); assert.eq( j * 100 , coll.find().limit(100000000).itcount() , "itcount A" ); +assert.eq( j * 100 , counta + countb , "from each 2 a:" + counta + " b:" + countb + " i:" + i ); +assert( missing.length == 0 , "missing : " + tojson( missing ) ); + +print( "checkpoint C" ) assert( Array.unique( s.config.chunks.find().toArray().map( function(z){ return z.shard; } ) ).length == 2 , "should be using both servers" ); +for ( i=0; i<100; i++ ){ + cursor = coll.find().batchSize(5); + cursor.next(); + cursor = null; + gc(); +} + +print( "checkpoint D") + +// test not-sharded cursors +db = s.getDB( "test2" ); +t = db.foobar; +for ( i =0; i<100; i++ ) + t.save( { _id : i } ); +for ( i=0; i<100; i++ ){ + t.find().batchSize( 2 ).next(); + assert.lt( 0 , db.runCommand( "cursorInfo" ).totalOpen , "cursor1" ); + gc(); +} + +for ( i=0; i<100; i++ ){ + gc(); +} +assert.eq( 0 , db.runCommand( "cursorInfo" ).totalOpen , "cursor2" ); + +print( "checkpoint E") + +x = db.runCommand( "connPoolStats" ); +for ( host in x.hosts ){ + var foo = x.hosts[host]; + assert.lt( 0 , foo.available , "pool: " + host ); +} + +print( "checkpoint F") + +assert( t.findOne() , "check close 0" ); + +for ( i=0; i<20; i++ ){ + temp = new Mongo( db.getMongo().host ) + temp2 = temp.getDB( "test2" ).foobar; + assert.eq( temp._fullNameSpace , t._fullNameSpace , "check close 1" ); + assert( temp2.findOne() , "check close 2" ); + temp = null; + gc(); +} + +print( "checkpoint G") + +assert.throws( function(){ s.getDB( "test" ).foo.find().sort( { s : 1 } ).forEach( printjsononeline ) } ) + +print( "checkpoint H") + s.stop(); diff --git a/jstests/sharding/bigMapReduce.js b/jstests/sharding/bigMapReduce.js new file mode 100644 index 0000000..1cc12f4 --- /dev/null +++ b/jstests/sharding/bigMapReduce.js @@ -0,0 +1,17 @@ +s = new ShardingTest( "bigMapReduce" , 2 , 1 , 1 , { chunksize : 1 } ); + +s.adminCommand( { enablesharding : "test" } ) +s.adminCommand( { shardcollection : "test.foo", key : { "_id" : 1 } } ) + +db = s.getDB( "test" ); +var str="" +for (i=0;i<4*1024;i++) { str=str+"a"; } +for (j=0; j<50; j++) for (i=0; i<512; i++){ db.foo.save({y:str})} + +function map() { emit('count', 1); } +function reduce(key, values) { return Array.sum(values) } + +out = db.foo.mapReduce(map, reduce) +printjson(out) // SERVER-1400 + +s.stop() diff --git a/jstests/sharding/count1.js b/jstests/sharding/count1.js index a697162..ed69d1f 100644 --- a/jstests/sharding/count1.js +++ b/jstests/sharding/count1.js @@ -1,7 +1,6 @@ // count1.js -s = new ShardingTest( "count1" , 2 ); - +s = new ShardingTest( "count1" , 2 , 1 ); db = s.getDB( "test" ); db.bar.save( { n : 1 } ) @@ -15,16 +14,16 @@ s.adminCommand( { enablesharding : "test" } ) s.adminCommand( { shardcollection : "test.foo" , key : { name : 1 } } ); primary = s.getServer( "test" ).getDB( "test" ); -seconday = s.getOther( primary ).getDB( "test" ); +secondary = s.getOther( primary ).getDB( "test" ); assert.eq( 1 , s.config.chunks.count() , "sanity check A" ); -db.foo.save( { name : "eliot" } ) -db.foo.save( { name : "sara" } ) -db.foo.save( { name : "bob" } ) -db.foo.save( { name : "joe" } ) -db.foo.save( { name : "mark" } ) -db.foo.save( { name : "allan" } ) +db.foo.save( { _id : 1 , name : "eliot" } ) +db.foo.save( { _id : 2 , name : "sara" } ) +db.foo.save( { _id : 3 , name : "bob" } ) +db.foo.save( { _id : 4 , name : "joe" } ) +db.foo.save( { _id : 5 , name : "mark" } ) +db.foo.save( { _id : 6 , name : "allan" } ) assert.eq( 6 , db.foo.find().count() , "basic count" ); @@ -35,20 +34,57 @@ s.adminCommand( { split : "test.foo" , find : { name : "joe" } } ); assert.eq( 6 , db.foo.find().count() , "basic count after split " ); assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "basic count after split sorted " ); -s.adminCommand( { movechunk : "test.foo" , find : { name : "joe" } , to : seconday.getMongo().name } ); +s.adminCommand( { movechunk : "test.foo" , find : { name : "joe" } , to : secondary.getMongo().name } ); assert.eq( 3 , primary.foo.find().toArray().length , "primary count" ); -assert.eq( 3 , seconday.foo.find().toArray().length , "secondary count" ); +assert.eq( 3 , secondary.foo.find().toArray().length , "secondary count" ); assert.eq( 3 , primary.foo.find().sort( { name : 1 } ).toArray().length , "primary count sorted" ); -assert.eq( 3 , seconday.foo.find().sort( { name : 1 } ).toArray().length , "secondary count sorted" ); +assert.eq( 3 , secondary.foo.find().sort( { name : 1 } ).toArray().length , "secondary count sorted" ); assert.eq( 6 , db.foo.find().toArray().length , "total count after move" ); assert.eq( 6 , db.foo.find().sort( { name : 1 } ).toArray().length , "total count() sorted" ); assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "total count with count() after move" ); -assert.eq( "allan,bob,eliot,joe,mark,sara" , db.foo.find().sort( { name : 1 } ).toArray().map( function(z){ return z.name; } ) , "sort 1" ); -assert.eq( "sara,mark,joe,eliot,bob,allan" , db.foo.find().sort( { name : -1 } ).toArray().map( function(z){ return z.name; } ) , "sort 2" ); +function nameString( c ){ + var s = ""; + while ( c.hasNext() ){ + var o = c.next(); + if ( s.length > 0 ) + s += ","; + s += o.name; + } + return s; +} + +assert.eq( "allan,bob,eliot,joe,mark,sara" , nameString( db.foo.find().sort( { name : 1 } ) ) , "sort 1" ); +assert.eq( "sara,mark,joe,eliot,bob,allan" , nameString( db.foo.find().sort( { name : -1 } ) ) , "sort 2" ); + +assert.eq( 2 , db.foo.find().limit(2).itcount() , "LS1" ) +assert.eq( 2 , db.foo.find().skip(2).limit(2).itcount() , "LS2" ) +assert.eq( 1 , db.foo.find().skip(5).limit(2).itcount() , "LS3" ) +assert.eq( 6 , db.foo.find().limit(2).count() , "LSC1" ) +assert.eq( 2 , db.foo.find().limit(2).size() , "LSC2" ) +assert.eq( 2 , db.foo.find().skip(2).limit(2).size() , "LSC3" ) +assert.eq( 1 , db.foo.find().skip(5).limit(2).size() , "LSC4" ) + +assert.eq( "allan,bob" , nameString( db.foo.find().sort( { name : 1 } ).limit(2) ) , "LSD1" ) +assert.eq( "bob,eliot" , nameString( db.foo.find().sort( { name : 1 } ).skip(1).limit(2) ) , "LSD2" ) +assert.eq( "joe,mark" , nameString( db.foo.find().sort( { name : 1 } ).skip(3).limit(2) ) , "LSD3" ) + +assert.eq( "eliot,sara" , nameString( db.foo.find().sort( { _id : 1 } ).limit(2) ) , "LSE1" ) +assert.eq( "sara,bob" , nameString( db.foo.find().sort( { _id : 1 } ).skip(1).limit(2) ) , "LSE2" ) +assert.eq( "joe,mark" , nameString( db.foo.find().sort( { _id : 1 } ).skip(3).limit(2) ) , "LSE3" ) + +for ( i=0; i<10; i++ ){ + db.foo.save( { _id : 7 + i , name : "zzz" + i } ) +} + +assert.eq( 10 , db.foo.find( { name : { $gt : "z" } } ).itcount() , "LSF1" ) +assert.eq( 10 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).itcount() , "LSF2" ) +assert.eq( 5 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).skip(5).itcount() , "LSF3" ) +sleep( 5000 ) +assert.eq( 3 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).skip(5).limit(3).itcount() , "LSF4" ) s.stop(); diff --git a/jstests/sharding/count2.js b/jstests/sharding/count2.js new file mode 100644 index 0000000..2d9507e --- /dev/null +++ b/jstests/sharding/count2.js @@ -0,0 +1,43 @@ +// count2.js + +s1 = new ShardingTest( "count2" , 2 , 1 , 2 ); +s2 = s1._mongos[1]; + +s1.adminCommand( { enablesharding: "test" } ); +s1.adminCommand( { shardcollection: "test.foo" , key : { name : 1 } } ); + +db1 = s1.getDB( "test" ).foo; +db2 = s2.getDB( "test" ).foo; + +assert.eq( 1, s1.config.chunks.count(), "sanity check A"); + +db1.save( { name : "aaa" } ) +db1.save( { name : "bbb" } ) +db1.save( { name : "ccc" } ) +db1.save( { name : "ddd" } ) +db1.save( { name : "eee" } ) +db1.save( { name : "fff" } ) + +s1.adminCommand( { split : "test.foo" , middle : { name : "ddd" } } ); + +assert.eq( 3, db1.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) , "initial count mongos1" ); +assert.eq( 3, db2.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) , "initial count mongos2" ); + +s1.printChunks( "test.foo" ) + +s1.adminCommand( { movechunk : "test.foo" , find : { name : "aaa" } , to : s1.getOther( s1.getServer( "test" ) ).name } ); + +assert.eq( 3, db1.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) , "post count mongos1" ); + +// The second mongos still thinks its shard mapping is valid and accepts a cound +print( "before sleep: " + Date() ) +sleep( 2000 ) +print( "after sleep: " + Date() ) +s1.printChunks( "test.foo" ) +assert.eq( 3, db2.find( { name : { $gte: "aaa" , $lt: "ddd" } } ).count() , "post count mongos2" ); + +db2.findOne(); + +assert.eq( 3, db2.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) ); + +s1.stop(); diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js new file mode 100644 index 0000000..2a30936 --- /dev/null +++ b/jstests/sharding/cursor1.js @@ -0,0 +1,60 @@ +// cursor1.js +// checks that cursors survive a chunk's move + +s = new ShardingTest( "sharding_cursor1" , 2 , 2 ) + +// take the balancer out of the equation +s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true ); +s.config.settings.find().forEach( printjson ) + +// create a sharded 'test.foo', for the moment with just one chunk +s.adminCommand( { enablesharding: "test" } ); +s.adminCommand( { shardcollection: "test.foo", key: { _id: 1 } } ) + +db = s.getDB( "test" ); +primary = s.getServer( "test" ).getDB( "test" ); +secondary = s.getOther( primary ).getDB( "test" ); + +numObjs = 10; +for (i=0; i < numObjs; i++){ + db.foo.insert({_id: i}); +} +db.getLastError(); +assert.eq( 1, s.config.chunks.count() , "test requires collection to have one chunk initially" ); + +// we'll split the collection in two and move the second chunk while three cursors are open +// cursor1 still has more data in the first chunk, the one that didn't move +// cursor2 buffered the last obj of the first chunk +// cursor3 buffered data that was moved on the second chunk +var cursor1 = db.foo.find().batchSize( 3 ); +assert.eq( 3 , cursor1.objsLeftInBatch() ); +var cursor2 = db.foo.find().batchSize( 5 ); +assert.eq( 5 , cursor2.objsLeftInBatch() ); +var cursor3 = db.foo.find().batchSize( 7 ); +assert.eq( 7 , cursor3.objsLeftInBatch() ); + +s.adminCommand( { split: "test.foo" , middle : { _id : 5 } } ); +s.adminCommand( { movechunk : "test.foo" , find : { _id : 5 } , to : secondary.getMongo().name } ); +assert.eq( 2, s.config.chunks.count() ); + +// the cursors should not have been affected +assert.eq( numObjs , cursor1.itcount() , "c1" ); +assert.eq( numObjs , cursor2.itcount() , "c2" ); +assert.eq( numObjs , cursor3.itcount() , "c3" ); + +// test timeout +gc(); gc(); +cur = db.foo.find().batchSize( 2 ) +assert( cur.next() , "T1" ) +assert( cur.next() , "T2" ); +before = db.runCommand( { "cursorInfo" : 1 , "setTimeout" : 10000 } ) // 10 seconds +printjson( before ) +sleep( 6000 ) +assert( cur.next() , "T3" ) +assert( cur.next() , "T4" ); +sleep( 22000 ) +assert.throws( function(){ cur.next(); } , "T5" ) +after = db.runCommand( { "cursorInfo" : 1 , "setTimeout" : 10000 } ) // 10 seconds +gc(); gc() + +s.stop() diff --git a/jstests/sharding/diffservers1.js b/jstests/sharding/diffservers1.js index 6497bc0..31fd75a 100644 --- a/jstests/sharding/diffservers1.js +++ b/jstests/sharding/diffservers1.js @@ -14,7 +14,7 @@ assert( 3 , test1.count() ); assert( ! s.admin.runCommand( { addshard: "sdd$%" } ).ok , "bad hostname" ); assert( ! s.admin.runCommand( { addshard: "127.0.0.1:43415" } ).ok , "host not up" ); -assert( ! s.admin.runCommand( { addshard: "127.0.0.1:43415" , allowLocal : true } ).ok , "host not up" ); +assert( ! s.admin.runCommand( { addshard: "10.0.0.1:43415" } ).ok , "allowed shard in IP when config is localhost" ); s.stop(); diff --git a/jstests/sharding/error1.js b/jstests/sharding/error1.js index b4db9c3..e1aae06 100644 --- a/jstests/sharding/error1.js +++ b/jstests/sharding/error1.js @@ -18,19 +18,21 @@ assert( db.getLastError() , "gle22" ); s.adminCommand( { shardcollection : "test.foo2" , key : { num : 1 } } ); -db.foo2.insert( { _id : 1 , num : 5 } ); -db.foo2.insert( { _id : 2 , num : 10 } ); -db.foo2.insert( { _id : 3 , num : 15 } ); -db.foo2.insert( { _id : 4 , num : 20 } ); +db.foo2.save( { _id : 1 , num : 5 } ); +db.foo2.save( { _id : 2 , num : 10 } ); +db.foo2.save( { _id : 3 , num : 15 } ); +db.foo2.save( { _id : 4 , num : 20 } ); s.adminCommand( { split : "test.foo2" , middle : { num : 10 } } ); s.adminCommand( { movechunk : "test.foo2" , find : { num : 20 } , to : s.getOther( s.getServer( "test" ) ).name } ); +print( "a: " + a.foo2.count() ); +print( "b: " + b.foo2.count() ); assert( a.foo2.count() > 0 && a.foo2.count() < 4 , "se1" ); assert( b.foo2.count() > 0 && b.foo2.count() < 4 , "se2" ); assert.eq( 4 , db.foo2.count() , "se3" ); -db.foo2.insert( { _id : 5 , num : 25 } ); +db.foo2.save( { _id : 5 , num : 25 } ); assert( ! db.getLastError() , "se3.5" ); s.sync(); assert.eq( 5 , db.foo2.count() , "se4" ); @@ -43,5 +45,17 @@ assert( db.getLastError() , "se6" ); assert.eq( 5 , db.foo2.count() , "se5" ); + +// assert in mongos +s.adminCommand( { shardcollection : "test.foo3" , key : { num : 1 } } ); +assert.isnull(db.getLastError() , "gle C1" ); + +db.foo3.insert({}); //this fails with no shard key error +assert(db.getLastError() , "gle C2a" ); +assert(db.getLastError() , "gle C2b" ); + +db.foo3.insert({num:1}); +assert.isnull(db.getLastError() , "gle C3a" ); + // ---- s.stop(); diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js index d2f692a..05b8b8c 100644 --- a/jstests/sharding/features1.js +++ b/jstests/sharding/features1.js @@ -50,10 +50,15 @@ s.sync(); assert.eq( 4 , a.foo.getIndexKeys().length , "a index 3" ); assert.eq( 4 , b.foo.getIndexKeys().length , "b index 3" ); +db.foo.ensureIndex( { num : 1 , bar : 1 } , true ); +s.sync(); +assert.eq( 5 , b.foo.getIndexKeys().length , "c index 3" ); + // ---- can't shard thing with unique indexes db.foo2.ensureIndex( { a : 1 } ); s.sync(); +printjson( db.system.indexes.find( { ns : "test.foo2" } ).toArray() ); assert( s.admin.runCommand( { shardcollection : "test.foo2" , key : { num : 1 } } ).ok , "shard with index" ); db.foo3.ensureIndex( { a : 1 } , true ); @@ -61,6 +66,12 @@ s.sync(); printjson( db.system.indexes.find( { ns : "test.foo3" } ).toArray() ); assert( ! s.admin.runCommand( { shardcollection : "test.foo3" , key : { num : 1 } } ).ok , "shard with unique index" ); +db.foo7.ensureIndex( { num : 1 , a : 1 } , true ); +s.sync(); +printjson( db.system.indexes.find( { ns : "test.foo7" } ).toArray() ); +assert( s.admin.runCommand( { shardcollection : "test.foo7" , key : { num : 1 } } ).ok , "shard with ok unique index" ); + + // ----- eval ----- db.foo2.save( { num : 5 , a : 7 } ); @@ -83,6 +94,7 @@ s.adminCommand( { split : "test.foo4" , middle : { num : 10 } } ); s.adminCommand( { movechunk : "test.foo4" , find : { num : 20 } , to : s.getOther( s.getServer( "test" ) ).name } ); db.foo4.save( { num : 5 } ); db.foo4.save( { num : 15 } ); +db.getLastError(); s.sync(); assert.eq( 1 , a.foo4.count() , "ua1" ); assert.eq( 1 , b.foo4.count() , "ub1" ); @@ -120,13 +132,15 @@ assert( ! s.admin.runCommand( { shardcollection : "test.foo5" , key : { num : 1 db.foo6.save( { a : 1 } ); db.foo6.save( { a : 3 } ); db.foo6.save( { a : 3 } ); +db.foo6.ensureIndex( { a : 1 } ); s.sync(); +printjson( db.system.indexes.find( { ns : "test.foo6" } ).toArray() ); assert.eq( 2 , db.foo6.group( { key : { a : 1 } , initial : { count : 0 } , reduce : function(z,prev){ prev.count++; } } ).length ); assert.eq( 3 , db.foo6.find().count() ); -assert( s.admin.runCommand( { shardcollection : "test.foo6" , key : { a : 2 } } ).ok ); +assert( s.admin.runCommand( { shardcollection : "test.foo6" , key : { a : 1 } } ).ok ); assert.eq( 3 , db.foo6.find().count() ); s.adminCommand( { split : "test.foo6" , middle : { a : 2 } } ); @@ -135,5 +149,16 @@ s.adminCommand( { movechunk : "test.foo6" , find : { a : 3 } , to : s.getOther( assert.throws( function(){ db.foo6.group( { key : { a : 1 } , initial : { count : 0 } , reduce : function(z,prev){ prev.count++; } } ); } );; +// ---- can't shard non-empty collection without index ----- + +db.foo8.save( { a : 1 } ); +assert( ! s.admin.runCommand( { shardcollection : "test.foo8" , key : { a : 1 } } ).ok , "non-empty collection" ); + +// --- listDatabases --- + +r = db.getMongo().getDBs() +assert.eq( 4 , r.databases.length , "listDatabases 1 : " + tojson( r ) ) +assert.lt( 10000 , r.totalSize , "listDatabases 2 : " + tojson( r ) ); + s.stop() diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js index 47fedc8..dfb2883 100644 --- a/jstests/sharding/features2.js +++ b/jstests/sharding/features2.js @@ -13,6 +13,7 @@ db = s.getDB( "test" ); db.foo.save( { x : 1 } ); db.foo.save( { x : 2 } ); db.foo.save( { x : 3 } ); +db.foo.ensureIndex( { x : 1 } ); assert.eq( "1,2,3" , db.foo.distinct( "x" ) , "distinct 1" ); assert( a.foo.distinct("x").length == 3 || b.foo.distinct("x").length == 3 , "distinct 2" ); @@ -51,25 +52,27 @@ assert.eq( 0 , db.foo.count() , "D7" ); // --- _id key --- -db.foo2.insert( { _id : new ObjectId() } ); -db.foo2.insert( { _id : new ObjectId() } ); -db.foo2.insert( { _id : new ObjectId() } ); +db.foo2.save( { _id : new ObjectId() } ); +db.foo2.save( { _id : new ObjectId() } ); +db.foo2.save( { _id : new ObjectId() } ); +db.getLastError(); assert.eq( 1 , s.onNumShards( "foo2" ) , "F1" ); +printjson( db.system.indexes.find( { ns : "test.foo2" } ).toArray() ); s.adminCommand( { shardcollection : "test.foo2" , key : { _id : 1 } } ); assert.eq( 3 , db.foo2.count() , "F2" ) db.foo2.insert( {} ); assert.eq( 4 , db.foo2.count() , "F3" ) - // --- map/reduce db.mr.save( { x : 1 , tags : [ "a" , "b" ] } ); db.mr.save( { x : 2 , tags : [ "b" , "c" ] } ); db.mr.save( { x : 3 , tags : [ "c" , "a" ] } ); db.mr.save( { x : 4 , tags : [ "b" , "c" ] } ); +db.mr.ensureIndex( { x : 1 } ); m = function(){ this.tags.forEach( @@ -88,8 +91,12 @@ r = function( key , values ){ }; doMR = function( n ){ + print(n); + var res = db.mr.mapReduce( m , r ); printjson( res ); + assert.eq( new NumberLong(4) , res.counts.input , "MR T0 " + n ); + var x = db[res.result]; assert.eq( 3 , x.find().count() , "MR T1 " + n ); @@ -111,4 +118,42 @@ assert.eq( 2 , s.onNumShards( "mr" ) , "E1" ); doMR( "after" ); +s.adminCommand({split:'test.mr' , middle:{x:3}} ); +s.adminCommand({split:'test.mr' , middle:{x:4}} ); +s.adminCommand({movechunk:'test.mr', find:{x:3}, to: s.getServer('test').name } ); + +doMR( "after extra split" ); + +cmd = { mapreduce : "mr" , map : "emit( " , reduce : "fooz + " }; + +x = db.runCommand( cmd ); +y = s._connections[0].getDB( "test" ).runCommand( cmd ); + +printjson( x ) +printjson( y ) + +// count + +db.countaa.save({"regex" : /foo/i}) +db.countaa.save({"regex" : /foo/i}) +db.countaa.save({"regex" : /foo/i}) +assert.eq( 3 , db.countaa.count() , "counta1" ); +assert.eq( 3 , db.countaa.find().itcount() , "counta1" ); + +x = null; y = null; +try { + x = db.runCommand( "forceerror" ) +} +catch ( e ){ + x = e; +} +try { + y = s._connections[0].getDB( "test" ).runCommand( "forceerror" ); +} +catch ( e ){ + y = e; +} + +assert.eq( x , y , "assert format" ) + s.stop(); diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js new file mode 100644 index 0000000..4ab75ee --- /dev/null +++ b/jstests/sharding/features3.js @@ -0,0 +1,86 @@ + +s = new ShardingTest( "features3" , 2 , 1 , 1 ); +s.adminCommand( { enablesharding : "test" } ); + +a = s._connections[0].getDB( "test" ); +b = s._connections[1].getDB( "test" ); + +db = s.getDB( "test" ); + +// ---------- load some data ----- + +s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); +N = 10000; +s.adminCommand( { split : "test.foo" , middle : { _id : N/2 } } ) +s.adminCommand( { moveChunk : "test.foo", find : { _id : 3 } ,to : s.getNonPrimaries( "test" )[0] } ) + +for ( i=0; i<N; i++ ) + db.foo.insert( { _id : i } ) +db.getLastError(); +x = db.foo.stats(); +assert.eq( N , x.count , "total count" ) +assert.eq( N / 2 , x.shards.shard0000.count , "count on shard0000" ) +assert.eq( N / 2 , x.shards.shard0001.count , "count on shard0001" ) + +start = new Date() + +join = startParallelShell( "db.foo.find( function(){ x = \"\"; for ( i=0; i<5000; i++ ){ x+=i; } return true; } ).itcount()" ) + +function getMine(){ + var inprog = db.currentOp().inprog; + var mine = [] + for ( var x=0; x<inprog.length; x++ ){ + if ( inprog[x].query && inprog[x].query.$where ){ + mine.push( inprog[x] ) + } + } + return mine; +} + +state = 0; // 0 = not found, 1 = killed, +killTime = null; + +for ( i=0; i<100000; i++ ){ + var mine = getMine(); + if ( state == 0 ){ + if ( mine.length == 0 ){ + sleep(1); + continue; + } + state = 1; + mine.forEach( function(z){ printjson( db.getSisterDB( "admin" ).killOp( z.opid ) ); } ) + killTime = new Date() + } + else if ( state == 1 ){ + if ( mine.length == 0 ){ + state = 2; + break; + } + continue; + } +} + +killTime = (new Date()).getTime() - killTime.getTime() +print( "killTime: " + killTime ); + +assert.eq( 2 , state , "failed killing" ); +assert.gt( 3000 , killTime , "took too long to kill" ) + +join() + +end = new Date() + +print( "elapsed: " + ( end.getTime() - start.getTime() ) ); + + +x = db.runCommand( "fsync" ) +assert( ! x.ok , "fsync not on admin should fail : " + tojson( x ) ); +assert( x.errmsg.indexOf( "access denied" ) >= 0 , "fsync not on admin should fail : " + tojson( x ) ) + +x = db._adminCommand( "fsync" ) +assert( x.ok == 1 && x.numFiles > 0 , "fsync failed : " + tojson( x ) ) + +x = db._adminCommand( { "fsync" :1, lock:true } ) +assert( ! x.ok , "lock should fail: " + tojson( x ) ) + +s.stop() diff --git a/jstests/sharding/findandmodify1.js b/jstests/sharding/findandmodify1.js index 774701f..437ec81 100644 --- a/jstests/sharding/findandmodify1.js +++ b/jstests/sharding/findandmodify1.js @@ -1,53 +1,51 @@ -s = new ShardingTest( "find_and_modify_sharded" , 2 ); +s = new ShardingTest( "find_and_modify_sharded" , 2 , 2); s.adminCommand( { enablesharding : "test" } ); db = s.getDB( "test" ); primary = s.getServer( "test" ).getDB( "test" ); -seconday = s.getOther( primary ).getDB( "test" ); +secondary = s.getOther( primary ).getDB( "test" ); numObjs = 20; s.adminCommand( { shardcollection : "test.stuff" , key : {_id:1} } ); +// pre-split the collection so to avoid interference from balancer +s.adminCommand( { split: "test.stuff" , middle : { _id : numObjs/2 } } ); +s.adminCommand( { movechunk : "test.stuff" , find : { _id : numObjs/2 } , to : secondary.getMongo().name } ) ; + for (var i=0; i < numObjs; i++){ db.stuff.insert({_id: i}); } +db.getLastError() -for (var i=0; i < numObjs; i+=2){ +// put two docs in each chunk (avoid the split in 0, since there are no docs less than 0) +for (var i=2; i < numObjs; i+=2){ + if (i == numObjs/2) + continue; s.adminCommand( { split: "test.stuff" , middle : {_id: i} } ); } -for (var i=0; i < numObjs; i+=4){ - s.adminCommand( { movechunk : "test.stuff" , find : {_id: i} , to : seconday.getMongo().name } ); -} - -//sorted update -for (var i=0; i < numObjs; i++){ - assert.eq(db.stuff.count({a:1}), i, "1 A"); - - var out = db.stuff.findAndModify({query: {a:null}, update: {$set: {a:1}}, sort: {_id:1}}); - - assert.eq(db.stuff.count({a:1}), i+1, "1 B"); - assert.eq(db.stuff.findOne({_id:i}).a, 1, "1 C"); - assert.eq(out._id, i, "1 D"); -} +s.printChunks(); +assert.eq( numObjs/2, s.config.chunks.count(), "split failed" ); +assert.eq( numObjs/4, s.config.chunks.count({ shard: "shard0000" }) ); +assert.eq( numObjs/4, s.config.chunks.count({ shard: "shard0001" }) ); -// unsorted update +// update for (var i=0; i < numObjs; i++){ assert.eq(db.stuff.count({b:1}), i, "2 A"); - var out = db.stuff.findAndModify({query: {b:null}, update: {$set: {b:1}}}); + var out = db.stuff.findAndModify({query: {_id:i, b:null}, update: {$set: {b:1}}}); + assert.eq(out._id, i, "2 E"); assert.eq(db.stuff.count({b:1}), i+1, "2 B"); - assert.eq(db.stuff.findOne({_id:out._id}).a, 1, "2 C"); } -//sorted remove (no query) +// remove for (var i=0; i < numObjs; i++){ assert.eq(db.stuff.count(), numObjs - i, "3 A"); assert.eq(db.stuff.count({_id: i}), 1, "3 B"); - var out = db.stuff.findAndModify({remove: true, sort: {_id:1}}); + var out = db.stuff.findAndModify({remove: true, query: {_id:i}}); assert.eq(db.stuff.count(), numObjs - i - 1, "3 C"); assert.eq(db.stuff.count({_id: i}), 0, "3 D"); diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js index d1644ac..1e0ba9d 100644 --- a/jstests/sharding/key_many.js +++ b/jstests/sharding/key_many.js @@ -1,14 +1,18 @@ // key_many.js // values have to be sorted +// you must have exactly 6 values in each array types = [ { name : "string" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield: "k" } , { name : "double" , values : [ 1.2 , 3.5 , 4.5 , 4.6 , 6.7 , 9.9 ] , keyfield : "a" } , { name : "date" , values : [ new Date( 1000000 ) , new Date( 2000000 ) , new Date( 3000000 ) , new Date( 4000000 ) , new Date( 5000000 ) , new Date( 6000000 ) ] , keyfield : "a" } , { name : "string_id" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield : "_id" }, - { name : "embedded" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield : "a.b" } , + { name : "embedded 1" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield : "a.b" } , { name : "embedded 2" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield : "a.b.c" } , { name : "object" , values : [ {a:1, b:1.2}, {a:1, b:3.5}, {a:1, b:4.5}, {a:2, b:1.2}, {a:2, b:3.5}, {a:2, b:4.5} ] , keyfield : "o" } , + { name : "compound" , values : [ {a:1, b:1.2}, {a:1, b:3.5}, {a:1, b:4.5}, {a:2, b:1.2}, {a:2, b:3.5}, {a:2, b:4.5} ] , keyfield : "o" , compound : true } , + { name : "oid_id" , values : [ ObjectId() , ObjectId() , ObjectId() , ObjectId() , ObjectId() , ObjectId() ] , keyfield : "_id" } , + { name : "oid_other" , values : [ ObjectId() , ObjectId() , ObjectId() , ObjectId() , ObjectId() , ObjectId() ] , keyfield : "o" } , ] s = new ShardingTest( "key_many" , 2 ); @@ -20,7 +24,18 @@ seconday = s.getOther( primary ).getDB( "test" ); function makeObjectDotted( v ){ var o = {}; - o[curT.keyfield] = v; + if (curT.compound){ + var prefix = curT.keyfield + '.'; + if (typeof(v) == 'object'){ + for (key in v) + o[prefix + key] = v[key]; + } else { + for (key in curT.values[0]) + o[prefix + key] = v; + } + } else { + o[curT.keyfield] = v; + } return o; } @@ -39,6 +54,15 @@ function makeObject( v ){ return o; } +function makeInQuery(){ + if (curT.compound){ + // cheating a bit... + return {'o.a': {$in: [1,2]}}; + } else { + return makeObjectDotted({$in: curT.values}); + } +} + function getKey( o ){ var keys = curT.keyfield.split('.'); for(var i=0; i<keys.length; i++){ @@ -85,7 +109,20 @@ for ( var i=0; i<types.length; i++ ){ assert.eq( 6 , c.find().sort( makeObjectDotted( 1 ) ).count() , curT.name + " total count with count()" ); + assert.eq( 2 , c.find({$or:[makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).count() , curT.name + " $or count()" ); + assert.eq( 2 , c.find({$or:[makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).itcount() , curT.name + " $or itcount()" ); + assert.eq( 4 , c.find({$nor:[makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).count() , curT.name + " $nor count()" ); + assert.eq( 4 , c.find({$nor:[makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).itcount() , curT.name + " $nor itcount()" ); + + var stats = c.stats(); + printjson( stats ) + assert.eq( 6 , stats.count , curT.name + " total count with stats()" ); + var count = 0; + for (shard in stats.shards) count += stats.shards[shard].count; + assert.eq( 6 , count , curT.name + " total count with stats() sum" ); + assert.eq( curT.values , c.find().sort( makeObjectDotted( 1 ) ).toArray().map( getKey ) , curT.name + " sort 1" ); + assert.eq( curT.values , c.find(makeInQuery()).sort( makeObjectDotted( 1 ) ).toArray().map( getKey ) , curT.name + " sort 1 - $in" ); assert.eq( curT.values.reverse() , c.find().sort( makeObjectDotted( -1 ) ).toArray().map( getKey ) , curT.name + " sort 2" ); diff --git a/jstests/sharding/movePrimary1.js b/jstests/sharding/movePrimary1.js index 20dc6c1..952748e 100644 --- a/jstests/sharding/movePrimary1.js +++ b/jstests/sharding/movePrimary1.js @@ -19,13 +19,26 @@ to = s.getOther( from ); assert.eq( 3 , from.getDB( "test1" ).foo.count() , "from doesn't have data before move" ); assert.eq( 0 , to.getDB( "test1" ).foo.count() , "to has data before move" ); -assert.eq( s.config.databases.findOne( { name : "test1" } ).primary , from.name , "not in db correctly to start" ); +assert.eq( s.normalize( s.config.databases.findOne( { _id : "test1" } ).primary ) , + s.normalize( from.name ) , "not in db correctly to start" ); +s.printShardingStatus(); +oldShardName = s.config.databases.findOne( {_id: "test1"} ).primary; s.admin.runCommand( { moveprimary : "test1" , to : to.name } ); -assert.eq( s.config.databases.findOne( { name : "test1" } ).primary , to.name , "to in config db didn't change" ); - +s.printShardingStatus(); +assert.eq( s.normalize( s.config.databases.findOne( { _id : "test1" } ).primary ), + s.normalize( to.name ) , "to in config db didn't change after first move" ); assert.eq( 0 , from.getDB( "test1" ).foo.count() , "from still has data after move" ); assert.eq( 3 , to.getDB( "test1" ).foo.count() , "to doesn't have data after move" ); +// move back, now using shard name instead of server address +s.admin.runCommand( { moveprimary : "test1" , to : oldShardName } ); +s.printShardingStatus(); +assert.eq( s.normalize( s.config.databases.findOne( { _id : "test1" } ).primary ), + oldShardName , "to in config db didn't change after second move" ); + +assert.eq( 3 , from.getDB( "test1" ).foo.count() , "from doesn't have data after move back" ); +assert.eq( 0 , to.getDB( "test1" ).foo.count() , "to has data after move back" ); + s.stop(); diff --git a/jstests/sharding/moveshard1.js b/jstests/sharding/moveshard1.js deleted file mode 100644 index 9220983..0000000 --- a/jstests/sharding/moveshard1.js +++ /dev/null @@ -1,39 +0,0 @@ -// movechunk1.js - -s = new ShardingTest( "movechunk1" , 2 ); - -l = s._connections[0]; -r = s._connections[1]; - -ldb = l.getDB( "foo" ); -rdb = r.getDB( "foo" ); - -ldb.things.save( { a : 1 } ) -ldb.things.save( { a : 2 } ) -ldb.things.save( { a : 3 } ) - -assert.eq( ldb.things.count() , 3 ); -assert.eq( rdb.things.count() , 0 ); - -startResult = l.getDB( "admin" ).runCommand( { "movechunk.start" : "foo.things" , - "to" : s._connections[1].name , - "from" : s._connections[0].name , - filter : { a : { $gt : 2 } } - } ); -print( "movechunk.start: " + tojson( startResult ) ); -assert( startResult.ok == 1 , "start failed!" ); - -finishResult = l.getDB( "admin" ).runCommand( { "movechunk.finish" : "foo.things" , - finishToken : startResult.finishToken , - to : s._connections[1].name , - newVersion : 1 } ); -print( "movechunk.finish: " + tojson( finishResult ) ); -assert( finishResult.ok == 1 , "finishResult failed!" ); - -assert.eq( rdb.things.count() , 1 , "right has wrong size after move" ); -assert.eq( ldb.things.count() , 2 , "left has wrong size after move" ); - - -s.stop(); - - diff --git a/jstests/sharding/presplit.js b/jstests/sharding/presplit.js new file mode 100644 index 0000000..6815492 --- /dev/null +++ b/jstests/sharding/presplit.js @@ -0,0 +1,37 @@ +// presplit.js + +// Starts a new sharding environment limiting the chunksize to 1MB. +s = new ShardingTest( "presplit" , 2 , 2 , 1 , { chunksize : 1 } ); + +// Insert enough data in 'test.foo' to fill several chunks, if it was sharded. +bigString = ""; +while ( bigString.length < 10000 ){ + bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda"; +} + +db = s.getDB( "test" ); +inserted = 0; +num = 0; +while ( inserted < ( 20 * 1024 * 1024 ) ){ + db.foo.insert( { _id : num++ , s : bigString } ); + inserted += bigString.length; +} +db.getLastError(); + +// Make sure that there's only one chunk holding all the data. +s.printChunks(); +primary = s.getServer( "test" ).getDB( "test" ); +assert.eq( 0 , s.config.chunks.count() , "single chunk assertion" ); +assert.eq( num , primary.foo.count() ); + +// Turn on sharding on the 'test.foo' collection +s.adminCommand( { enablesharding : "test" } ); +s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); + +// Make sure the collection's original chunk got split +s.printChunks(); +assert.lt( 20 , s.config.chunks.count() , "many chunks assertion" ); +assert.eq( num , primary.foo.count() ); + +s.printChangeLog(); +s.stop();
\ No newline at end of file diff --git a/jstests/sharding/remove1.js b/jstests/sharding/remove1.js new file mode 100644 index 0000000..9593bdf --- /dev/null +++ b/jstests/sharding/remove1.js @@ -0,0 +1,16 @@ +s = new ShardingTest( "remove_shard1", 2 ); + +assert.eq( 2, s.config.shards.count() , "initial server count wrong" ); + +// first remove puts in draining mode, the second actually removes +assert( s.admin.runCommand( { removeshard: "shard0000" } ).ok , "failed to start draining shard" ); +assert( s.admin.runCommand( { removeshard: "shard0000" } ).ok , "failed to remove shard" ); +assert.eq( 1, s.config.shards.count() , "removed server still appears in count" ); + +// should create a shard0002 shard +conn = startMongodTest( 29000 ); +assert( s.admin.runCommand( { addshard: "localhost:29000" } ).ok, "failed to add shard" ); +assert.eq( 2, s.config.shards.count(), "new server does not appear in count" ); + +stopMongod( 29000 ); +s.stop();
\ No newline at end of file diff --git a/jstests/sharding/rename.js b/jstests/sharding/rename.js new file mode 100644 index 0000000..aa6137d --- /dev/null +++ b/jstests/sharding/rename.js @@ -0,0 +1,26 @@ +s = new ShardingTest( "rename" , 2 , 1 , 1 ); +db = s.getDB( "test" ); + +db.foo.insert({_id:1}); +db.foo.renameCollection('bar'); +assert.isnull(db.getLastError(), '1.0'); +assert.eq(db.bar.findOne(), {_id:1}, '1.1'); +assert.eq(db.bar.count(), 1, '1.2'); +assert.eq(db.foo.count(), 0, '1.3'); + +db.foo.insert({_id:2}); +db.foo.renameCollection('bar', true); +assert.isnull(db.getLastError(), '2.0'); +assert.eq(db.bar.findOne(), {_id:2}, '2.1'); +assert.eq(db.bar.count(), 1, '2.2'); +assert.eq(db.foo.count(), 0, '2.3'); + +s.adminCommand( { enablesharding : "test" } ); + +db.foo.insert({_id:3}); +db.foo.renameCollection('bar', true); +assert.isnull(db.getLastError(), '3.0'); +assert.eq(db.bar.findOne(), {_id:3}, '3.1'); +assert.eq(db.bar.count(), 1, '3.2'); +assert.eq(db.foo.count(), 0, '3.3'); + diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js index bbe1144..1783238 100644 --- a/jstests/sharding/shard1.js +++ b/jstests/sharding/shard1.js @@ -8,7 +8,8 @@ db = s.getDB( "test" ); db.foo.insert( { num : 1 , name : "eliot" } ); db.foo.insert( { num : 2 , name : "sara" } ); db.foo.insert( { num : -1 , name : "joe" } ); -assert.eq( 3 , db.foo.find().length() ); +db.foo.ensureIndex( { num : 1 } ); +assert.eq( 3 , db.foo.find().length() , "A" ); shardCommand = { shardcollection : "test.foo" , key : { num : 1 } }; @@ -18,10 +19,15 @@ s.adminCommand( { enablesharding : "test" } ); assert.eq( 3 , db.foo.find().length() , "after partitioning count failed" ); s.adminCommand( shardCommand ); -dbconfig = s.config.databases.findOne( { name : "test" } ); -assert.eq( dbconfig.sharded["test.foo"] , { key : { num : 1 } , unique : false } , "Sharded content" ); -assert.eq( 1 , s.config.chunks.count() ); +cconfig = s.config.collections.findOne( { _id : "test.foo" } ); +delete cconfig.lastmod +delete cconfig.dropped +assert.eq( cconfig , { _id : "test.foo" , key : { num : 1 } , unique : false } , "Sharded content" ); + +s.config.collections.find().forEach( printjson ) + +assert.eq( 1 , s.config.chunks.count() , "num chunks A"); si = s.config.chunks.findOne(); assert( si ); assert.eq( si.ns , "test.foo" ); diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js index 5932210..09caf39 100644 --- a/jstests/sharding/shard2.js +++ b/jstests/sharding/shard2.js @@ -8,7 +8,17 @@ placeCheck = function( num ){ print("shard2 step: " + num ); } -s = new ShardingTest( "shard2" , 2 , 6 ); +printAll = function(){ + print( "****************" ); + db.foo.find().forEach( printjsononeline ) + print( "++++++++++++++++++" ); + primary.foo.find().forEach( printjsononeline ) + print( "++++++++++++++++++" ); + secondary.foo.find().forEach( printjsononeline ) + print( "---------------------" ); +} + +s = new ShardingTest( "shard2" , 2 , 2 ); db = s.getDB( "test" ); @@ -26,7 +36,7 @@ db.foo.save( { num : 1 , name : "eliot" } ); db.foo.save( { num : 2 , name : "sara" } ); db.foo.save( { num : -1 , name : "joe" } ); -s.adminCommand( "connpoolsync" ); +db.getLastError(); assert.eq( 3 , s.getServer( "test" ).getDB( "test" ).foo.find().length() , "not right directly to db A" ); assert.eq( 3 , db.foo.find().length() , "not right on shard" ); @@ -59,18 +69,18 @@ placeCheck( 3 ); // test inserts go to right server/shard db.foo.save( { num : 3 , name : "bob" } ); -s.adminCommand( "connpoolsync" ); +db.getLastError(); assert.eq( 1 , primary.foo.find().length() , "after move insert go wrong place?" ); assert.eq( 3 , secondary.foo.find().length() , "after move insert go wrong place?" ); db.foo.save( { num : -2 , name : "funny man" } ); -s.adminCommand( "connpoolsync" ); +db.getLastError(); assert.eq( 2 , primary.foo.find().length() , "after move insert go wrong place?" ); assert.eq( 3 , secondary.foo.find().length() , "after move insert go wrong place?" ); db.foo.save( { num : 0 , name : "funny guy" } ); -s.adminCommand( "connpoolsync" ); +db.getLastError(); assert.eq( 2 , primary.foo.find().length() , "boundary A" ); assert.eq( 4 , secondary.foo.find().length() , "boundary B" ); @@ -129,6 +139,16 @@ assert.eq( "funny man" , db.foo.find( { num : { $lt : 100 } } ).sort( { num : 1 placeCheck( 7 ); +db.foo.find().sort( { _id : 1 } ).forEach( function(z){ print( z._id ); } ) + +zzz = db.foo.find().explain(); +assert.eq( 6 , zzz.nscanned , "EX1a" ) +assert.eq( 6 , zzz.n , "EX1b" ) + +zzz = db.foo.find().sort( { _id : 1 } ).explain(); +assert.eq( 6 , zzz.nscanned , "EX2a" ) +assert.eq( 6 , zzz.n , "EX2a" ) + // getMore assert.eq( 4 , db.foo.find().limit(-4).toArray().length , "getMore 1" ); function countCursor( c ){ @@ -178,6 +198,19 @@ placeCheck( 8 ); db.getLastError(); db.getPrevError(); +// more update stuff + +printAll(); +total = db.foo.find().count(); +db.foo.update( {} , { $inc : { x : 1 } } , false , true ); +x = db.getLastErrorObj(); +printAll(); +assert.eq( total , x.n , "getLastError n A: " + tojson( x ) ); + + +db.foo.update( { num : -1 } , { $inc : { x : 1 } } , false , true ); +assert.eq( 1 , db.getLastErrorObj().n , "getLastErrorObj n B" ); + // ---- move all to the secondary assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards" ); @@ -191,4 +224,6 @@ s.adminCommand( { movechunk : "test.foo" , find : { num : -2 } , to : primary.ge assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards again" ); assert.eq( 3 , s.config.chunks.count() , "only 3 chunks" ); +print( "YO : " + tojson( db.runCommand( "serverStatus" ) ) ); + s.stop(); diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js index 8c5b184..9f0cef4 100644 --- a/jstests/sharding/shard3.js +++ b/jstests/sharding/shard3.js @@ -1,12 +1,14 @@ // shard3.js -s = new ShardingTest( "shard3" , 2 , 50 , 2 ); +s = new ShardingTest( "shard3" , 2 , 1 , 2 ); s2 = s._mongos[1]; s.adminCommand( { enablesharding : "test" } ); s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } ); +s.config.databases.find().forEach( printjson ) + a = s.getDB( "test" ).foo; b = s2.getDB( "test" ).foo; @@ -35,6 +37,8 @@ assert.eq( 3 , primary.find().itcount() + secondary.find().itcount() , "blah 3" assert.eq( 3 , a.find().toArray().length , "normal B" ); assert.eq( 3 , b.find().toArray().length , "other B" ); +printjson( primary._db._adminCommand( "shardingState" ) ); + // --- filtering --- function doCounts( name , total ){ @@ -47,8 +51,8 @@ function doCounts( name , total ){ } var total = doCounts( "before wrong save" ) -secondary.save( { num : -3 } ); -doCounts( "after wrong save" , total ) +//secondary.save( { num : -3 } ); +//doCounts( "after wrong save" , total ) // --- move all to 1 --- print( "MOVE ALL TO 1" ); @@ -60,12 +64,16 @@ assert( a.findOne( { num : 1 } ) ) assert( b.findOne( { num : 1 } ) ) print( "GOING TO MOVE" ); +assert( a.findOne( { num : 1 } ) , "pre move 1" ) s.printCollectionInfo( "test.foo" ); -s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : s.getOther( s.getServer( "test" ) ).name } ); +myto = s.getOther( s.getServer( "test" ) ).name +print( "counts before move: " + tojson( s.shardCounts( "foo" ) ) ); +s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : myto } ) +print( "counts after move: " + tojson( s.shardCounts( "foo" ) ) ); s.printCollectionInfo( "test.foo" ); assert.eq( 1 , s.onNumShards( "foo" ) , "on 1 shard again" ); -assert( a.findOne( { num : 1 } ) ) -assert( b.findOne( { num : 1 } ) ) +assert( a.findOne( { num : 1 } ) , "post move 1" ) +assert( b.findOne( { num : 1 } ) , "post move 2" ) print( "*** drop" ); @@ -127,4 +135,32 @@ s.printShardingStatus(); s.printCollectionInfo( "test.foo" , "after dropDatabase call 1" ); assert.eq( 0 , doCounts( "after dropDatabase called" ) ) +// ---- retry commands SERVER-1471 ---- + +s.adminCommand( { enablesharding : "test2" } ); +s.adminCommand( { shardcollection : "test2.foo" , key : { num : 1 } } ); +a = s.getDB( "test2" ).foo; +b = s2.getDB( "test2" ).foo; +a.save( { num : 1 } ); +a.save( { num : 2 } ); +a.save( { num : 3 } ); + + +assert.eq( 1 , s.onNumShards( "foo" , "test2" ) , "B on 1 shards" ); +assert.eq( 3 , a.count() , "Ba" ); +assert.eq( 3 , b.count() , "Bb" ); + +s.adminCommand( { split : "test2.foo" , middle : { num : 2 } } ); +s.adminCommand( { movechunk : "test2.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test2" ) ).name } ); + +assert.eq( 2 , s.onNumShards( "foo" , "test2" ) , "B on 2 shards" ); + +x = a.stats() +printjson( x ) +y = b.stats() +printjson( y ) + + + + s.stop(); diff --git a/jstests/sharding/shard6.js b/jstests/sharding/shard6.js index e15d74c..70c5ed7 100644 --- a/jstests/sharding/shard6.js +++ b/jstests/sharding/shard6.js @@ -1,12 +1,30 @@ // shard6.js -s = new ShardingTest( "shard6" , 2 , 0 , 1 ); +summary = ""; + +s = new ShardingTest( "shard6" , 2 , 0 , 2 ); s.adminCommand( { enablesharding : "test" } ); s.adminCommand( { shardcollection : "test.data" , key : { num : 1 } } ); db = s.getDB( "test" ); +function poolStats( where ){ + var total = 0; + var msg = "poolStats " + where + " "; + var x = db.runCommand( "connPoolStats" ).hosts + for ( var h in x ){ + var z = x[h]; + msg += z.created + " "; + total += z.created + } + print( "****\n" + msg + "\n*****" ) + summary += msg + "\n"; + return total +} + +poolStats( "at start" ) + // we want a lot of data, so lets make a 50k string to cheat :) bigString = ""; while ( bigString.length < 50000 ) @@ -18,22 +36,71 @@ for ( ; num<100; num++ ){ db.data.save( { num : num , bigString : bigString } ); } -assert.eq( 100 , db.data.find().toArray().length ); +assert.eq( 100 , db.data.find().toArray().length , "basic find after setup" ); + +connBefore = poolStats( "setup done" ) // limit assert.eq( 77 , db.data.find().limit(77).itcount() , "limit test 1" ); assert.eq( 1 , db.data.find().limit(1).itcount() , "limit test 2" ); for ( var i=1; i<10; i++ ){ - assert.eq( i , db.data.find().limit(i).itcount() , "limit test 3 : " + i ); + assert.eq( i , db.data.find().limit(i).itcount() , "limit test 3a : " + i ); + assert.eq( i , db.data.find().skip(i).limit(i).itcount() , "limit test 3b : " + i ); + poolStats( "after loop : " + i ); } +assert.eq( connBefore , poolStats( "limit test done" ) , "limit test conns" ); + +function assertOrder( start , num ){ + var a = db.data.find().skip(start).limit(num).sort( { num : 1 } ).map( function(z){ return z.num; } ); + var c = [] + for ( var i=0; i<num; i++ ) + c.push( start + i ); + assert.eq( c , a , "assertOrder start: " + start + " num: " + num ); +} + +assertOrder( 0 , 10 ); +assertOrder( 5 , 10 ); + +poolStats( "after checking order" ) + +function doItCount( skip , sort , batchSize ){ + var c = db.data.find(); + if ( skip ) + c.skip( skip ) + if ( sort ) + c.sort( sort ); + if ( batchSize ) + c.batchSize( batchSize ) + return c.itcount(); + +} + +function checkItCount( batchSize ){ + assert.eq( 5 , doItCount( num - 5 , null , batchSize ) , "skip 1 " + batchSize ); + assert.eq( 5 , doItCount( num - 5 , { num : 1 } , batchSize ) , "skip 2 " + batchSize ); + assert.eq( 5 , doItCount( num - 5 , { _id : 1 } , batchSize ) , "skip 3 " + batchSize ); + assert.eq( 0 , doItCount( num + 5 , { num : 1 } , batchSize ) , "skip 4 " + batchSize ); + assert.eq( 0 , doItCount( num + 5 , { _id : 1 } , batchSize ) , "skip 5 " + batchSize ); +} + +poolStats( "before checking itcount" ) + +checkItCount( 0 ) +checkItCount( 2 ) + +poolStats( "after checking itcount" ) // --- test save support --- o = db.data.findOne(); o.x = 16; db.data.save( o ); -assert.eq( 16 , db.data.findOne( { _id : o._id } ).x , "x1 - did save fail?" ); +o = db.data.findOne( { _id : o._id } ) +assert.eq( 16 , o.x , "x1 - did save fail? " + tojson(o) ); + +poolStats( "at end" ) +print( summary ) s.stop(); diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js new file mode 100644 index 0000000..0edb7a7 --- /dev/null +++ b/jstests/sharding/sort1.js @@ -0,0 +1,81 @@ + +s = new ShardingTest( "sort1" , 2 , 0 , 2 ) + +s.adminCommand( { enablesharding : "test" } ); +s.adminCommand( { shardcollection : "test.data" , key : { num : 1 } } ); + +db = s.getDB( "test" ); + +N = 100 + +forward = [] +backward = [] +for ( i=0; i<N; i++ ){ + db.data.insert( { _id : i , num : i , x : N - i } ) + forward.push( i ) + backward.push( ( N - 1 ) - i ) +} +db.getLastError(); + +s.adminCommand( { split : "test.data" , middle : { num : 33 } } ) +s.adminCommand( { split : "test.data" , middle : { num : 66 } } ) + +s.adminCommand( { movechunk : "test.data" , find : { num : 50 } , to : s.getOther( s.getServer( "test" ) ).name } ); + +assert.eq( 3 , s.config.chunks.find().itcount() , "A1" ); + +temp = s.config.chunks.find().sort( { min : 1 } ).toArray(); +assert.eq( temp[0].shard , temp[2].shard , "A2" ); +assert.neq( temp[0].shard , temp[1].shard , "A3" ); + +temp = db.data.find().sort( { num : 1 } ).toArray(); +assert.eq( N , temp.length , "B1" ); +for ( i=0; i<100; i++ ){ + assert.eq( i , temp[i].num , "B2" ) +} + + +db.data.find().sort( { num : 1 } ).toArray(); +s.getServer("test").getDB( "test" ).data.find().sort( { num : 1 } ).toArray(); + +a = Date.timeFunc( function(){ z = db.data.find().sort( { num : 1 } ).toArray(); } , 200 ); +assert.eq( 100 , z.length , "C1" ) +b = 1.5 * Date.timeFunc( function(){ z = s.getServer("test").getDB( "test" ).data.find().sort( { num : 1 } ).toArray(); } , 200 ); +assert.eq( 67 , z.length , "C2" ) + +print( "a: " + a + " b:" + b + " mongos slow down: " + Math.ceil( 100 * ( ( a - b ) / b ) ) + "%" ) + +// -- secondary index sorting + +function getSorted( by , want , dir , proj ){ + var s = {} + s[by] = dir || 1; + printjson( s ) + var cur = db.data.find( {} , proj || {} ).sort( s ) + return terse( cur.map( function(z){ return z[want]; } ) ); +} + +function terse( a ){ + var s = ""; + for ( var i=0; i<a.length; i++ ){ + if ( i > 0 ) + s += ","; + s += a[i]; + } + return s; +} + +forward = terse(forward); +backward = terse(backward); + +assert.eq( forward , getSorted( "num" , "num" , 1 ) , "D1" ) +assert.eq( backward , getSorted( "num" , "num" , -1 ) , "D2" ) + +assert.eq( backward , getSorted( "x" , "num" , 1 ) , "D3" ) +assert.eq( forward , getSorted( "x" , "num" , -1 ) , "D4" ) + +assert.eq( backward , getSorted( "x" , "num" , 1 , { num : 1 } ) , "D5" ) +assert.eq( forward , getSorted( "x" , "num" , -1 , { num : 1 } ) , "D6" ) + + +s.stop(); diff --git a/jstests/sharding/splitpick.js b/jstests/sharding/splitpick.js index ad27645..3733906 100644 --- a/jstests/sharding/splitpick.js +++ b/jstests/sharding/splitpick.js @@ -17,17 +17,23 @@ for ( var i=1; i<20; i++ ){ c.save( { a : i } ); } c.save( { a : 99 } ); +db.getLastError(); -assert.eq( s.admin.runCommand( { splitvalue : "test.foo" , find : { a : 1 } } ).middle.a , 1 , "splitvalue 1" ); -assert.eq( s.admin.runCommand( { splitvalue : "test.foo" , find : { a : 3 } } ).middle.a , 1 , "splitvalue 2" ); +function checkSplit( f, want , num ){ + x = s.admin.runCommand( { splitvalue : "test.foo" , find : { a : f } } ); + assert.eq( want, x.middle ? x.middle.a : null , "splitvalue " + num + " " + tojson( x ) ); +} + +checkSplit( 1 , 1 , "1" ) +checkSplit( 3 , 1 , "2" ) s.adminCommand( { split : "test.foo" , find : { a : 1 } } ); -assert.eq( s.admin.runCommand( { splitvalue : "test.foo" , find : { a : 3 } } ).middle.a , 99 , "splitvalue 3" ); +checkSplit( 3 , 99 , "3" ) s.adminCommand( { split : "test.foo" , find : { a : 99 } } ); assert.eq( s.config.chunks.count() , 3 ); s.printChunks(); -assert.eq( s.admin.runCommand( { splitvalue : "test.foo" , find : { a : 50 } } ).middle.a , 10 , "splitvalue 4 " ); +checkSplit( 50 , 10 , "4" ) s.stop(); diff --git a/jstests/sharding/stats.js b/jstests/sharding/stats.js new file mode 100644 index 0000000..c75d208 --- /dev/null +++ b/jstests/sharding/stats.js @@ -0,0 +1,60 @@ +s = new ShardingTest( "stats" , 2 , 1 , 1 ); +s.adminCommand( { enablesharding : "test" } ); + +a = s._connections[0].getDB( "test" ); +b = s._connections[1].getDB( "test" ); + +db = s.getDB( "test" ); + +function numKeys(o){ + var num = 0; + for (var x in o) + num++; + return num; +} + +// ---------- load some data ----- + +// need collections sharded before and after main collection for proper test +s.adminCommand( { shardcollection : "test.aaa" , key : { _id : 1 } } ); +s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); // this collection is actually used +s.adminCommand( { shardcollection : "test.zzz" , key : { _id : 1 } } ); + + +N = 10000; +s.adminCommand( { split : "test.foo" , middle : { _id : N/2 } } ) +s.adminCommand( { moveChunk : "test.foo", find : { _id : 3 } ,to : s.getNonPrimaries( "test" )[0] } ) + +for ( i=0; i<N; i++ ) + db.foo.insert( { _id : i } ) +db.getLastError(); + +x = db.foo.stats(); +assert.eq( N , x.count , "coll total count expected" ) +assert.eq( db.foo.count() , x.count , "coll total count match" ) +assert.eq( 2 , x.nchunks , "coll chunk num" ) +assert.eq( 2 , numKeys(x.shards) , "coll shard num" ) +assert.eq( N / 2 , x.shards.shard0000.count , "coll count on shard0000 expected" ) +assert.eq( N / 2 , x.shards.shard0001.count , "coll count on shard0001 expected" ) +assert.eq( a.foo.count() , x.shards.shard0000.count , "coll count on shard0000 match" ) +assert.eq( b.foo.count() , x.shards.shard0001.count , "coll count on shard0001 match" ) + + +a_extras = a.stats().objects - a.foo.count(); // things like system.namespaces and system.indexes +b_extras = b.stats().objects - b.foo.count(); // things like system.namespaces and system.indexes +print("a_extras: " + a_extras); +print("b_extras: " + b_extras); + +x = db.stats(); + +//dbstats uses Future::CommandResult so raw output uses connection strings not shard names +shards = Object.keySet(x.raw); + +assert.eq( N + (a_extras + b_extras) , x.objects , "db total count expected" ) +assert.eq( 2 , numKeys(x.raw) , "db shard num" ) +assert.eq( (N / 2) + a_extras, x.raw[shards[0]].objects , "db count on shard0000 expected" ) +assert.eq( (N / 2) + b_extras, x.raw[shards[1]].objects , "db count on shard0001 expected" ) +assert.eq( a.stats().objects , x.raw[shards[0]].objects , "db count on shard0000 match" ) +assert.eq( b.stats().objects , x.raw[shards[1]].objects , "db count on shard0001 match" ) + +s.stop() diff --git a/jstests/sharding/sync1.js b/jstests/sharding/sync1.js index 905b488..e649387 100644 --- a/jstests/sharding/sync1.js +++ b/jstests/sharding/sync1.js @@ -18,4 +18,9 @@ assert.eq( 2 , t.find().itcount() , "B2" ); test.tempStart(); test.checkHashes( "test" , "B3" ); + +assert.eq( 2 , t.find().itcount() , "C1" ); +t.remove( { x : 1 } ) +assert.eq( 1 , t.find().itcount() , "C2" ); + test.stop(); diff --git a/jstests/sharding/sync2.js b/jstests/sharding/sync2.js index b0bbcb6..c249d11 100644 --- a/jstests/sharding/sync2.js +++ b/jstests/sharding/sync2.js @@ -7,13 +7,13 @@ s2 = s._mongos[1]; s.adminCommand( { enablesharding : "test" } ); s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } ); -s.getDB( "test" ).foo.save( { num : 1 } ); -s.getDB( "test" ).foo.save( { num : 2 } ); -s.getDB( "test" ).foo.save( { num : 3 } ); -s.getDB( "test" ).foo.save( { num : 4 } ); -s.getDB( "test" ).foo.save( { num : 5 } ); -s.getDB( "test" ).foo.save( { num : 6 } ); -s.getDB( "test" ).foo.save( { num : 7 } ); +s.getDB( "test" ).foo.insert( { num : 1 } ); +s.getDB( "test" ).foo.insert( { num : 2 } ); +s.getDB( "test" ).foo.insert( { num : 3 } ); +s.getDB( "test" ).foo.insert( { num : 4 } ); +s.getDB( "test" ).foo.insert( { num : 5 } ); +s.getDB( "test" ).foo.insert( { num : 6 } ); +s.getDB( "test" ).foo.insert( { num : 7 } ); assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal A" ); assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other A" ); @@ -21,10 +21,10 @@ assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other A" ); s.adminCommand( { split : "test.foo" , middle : { num : 4 } } ); s.adminCommand( { movechunk : "test.foo" , find : { num : 3 } , to : s.getFirstOther( s.getServer( "test" ) ).name } ); -assert( s._connections[0].getDB( "test" ).foo.find().toArray().length > 0 , "blah 1" ); -assert( s._connections[1].getDB( "test" ).foo.find().toArray().length > 0 , "blah 2" ); +assert( s._connections[0].getDB( "test" ).foo.find().toArray().length > 0 , "shard 0 request" ); +assert( s._connections[1].getDB( "test" ).foo.find().toArray().length > 0 , "shard 1 request" ); assert.eq( 7 , s._connections[0].getDB( "test" ).foo.find().toArray().length + - s._connections[1].getDB( "test" ).foo.find().toArray().length , "blah 3" ); + s._connections[1].getDB( "test" ).foo.find().toArray().length , "combined shards" ); assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B" ); assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B" ); @@ -45,4 +45,54 @@ for ( var i=0; i<10; i++ ){ assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B " + i ); } +assert.eq( 0 , s.config.big.find().itcount() , "C1" ); +for ( i=0; i<50; i++ ){ + s.config.big.insert( { _id : i } ); +} +s.config.getLastError(); +assert.eq( 50 , s.config.big.find().itcount() , "C2" ); +assert.eq( 50 , s.config.big.find().count() , "C3" ); +assert.eq( 50 , s.config.big.find().batchSize(5).itcount() , "C4" ); + + +hashes = [] + +for ( i=0; i<3; i++ ){ + print( i ); + s._connections[i].getDB( "config" ).chunks.find( {} , { lastmod : 1 } ).forEach( printjsononeline ); + hashes[i] = s._connections[i].getDB( "config" ).runCommand( "dbhash" ); +} + +printjson( hashes ); + +for ( i=1; i<hashes.length; i++ ){ + if ( hashes[0].md5 == hashes[i].md5 ) + continue; + + assert.eq( hashes[0].numCollections , hashes[i].numCollections , "num collections" ); + + var bad = false; + + for ( var k in hashes[0].collections ){ + if ( hashes[0].collections[k] == + hashes[i].collections[k] ) + continue; + + if ( k == "mongos" || k == "changelog" || k == "locks" ) + continue; + + bad = true; + print( "collection " + k + " is different" ); + + print( "----" ); + s._connections[0].getDB( "config" ).getCollection( k ).find().sort( { _id : 1 } ).forEach( printjsononeline ); + print( "----" ); + s._connections[i].getDB( "config" ).getCollection( k ).find().sort( { _id : 1 } ).forEach( printjsononeline ); + print( "----" ); + } + + if ( bad ) + throw "hashes different"; +} + s.stop(); diff --git a/jstests/sharding/sync3.js b/jstests/sharding/sync3.js new file mode 100644 index 0000000..3737419 --- /dev/null +++ b/jstests/sharding/sync3.js @@ -0,0 +1,10 @@ + +test = new SyncCCTest( "sync3" , { logpath : "/dev/null" } ) + +x = test._connections[0].getDB( "admin" ).runCommand( { "_testDistLockWithSyncCluster" : 1 , host : test.url } ) +printjson( x ) +assert( x.ok ); + + + +test.stop(); diff --git a/jstests/sharding/sync4.js b/jstests/sharding/sync4.js new file mode 100644 index 0000000..6733f07 --- /dev/null +++ b/jstests/sharding/sync4.js @@ -0,0 +1,19 @@ + +test = new SyncCCTest( "sync4" ) + +db = test.conn.getDB( "test" ) +t = db.sync4 + +for ( i=0; i<1000; i++ ){ + t.insert( { _id : i , x : "asdasdsdasdas" } ) +} +db.getLastError(); + +test.checkHashes( "test" , "A0" ); +assert.eq( 1000 , t.find().count() , "A1" ) +assert.eq( 1000 , t.find().itcount() , "A2" ) +assert.eq( 1000 , t.find().snapshot().batchSize(10).itcount() , "A2" ) + + + +test.stop(); diff --git a/jstests/sharding/update1.js b/jstests/sharding/update1.js index 82c3d8a..63d4bf6 100644 --- a/jstests/sharding/update1.js +++ b/jstests/sharding/update1.js @@ -8,20 +8,24 @@ coll = db.update1; coll.insert({_id:1, key:1}); -// these are upserts +// these are both upserts coll.save({_id:2, key:2}); -coll.save({_id:3, key:3}); +coll.update({_id:3, key:3}, {$set: {foo: 'bar'}}, {upsert: true}); assert.eq(coll.count(), 3, "count A") +assert.eq(coll.findOne({_id:3}).key, 3 , "findOne 3 key A") +assert.eq(coll.findOne({_id:3}).foo, 'bar' , "findOne 3 foo A") // update existing using save() coll.save({_id:1, key:1, other:1}); // update existing using update() coll.update({_id:2}, {key:2, other:2}); -//coll.update({_id:3, key:3}, {other:3}); //should add key to new object (doesn't work yet) coll.update({_id:3}, {key:3, other:3}); +coll.update({_id:3, key:3}, {other:4}); +assert.eq(db.getLastErrorObj().code, 12376, 'bad update error'); + assert.eq(coll.count(), 3, "count B") coll.find().forEach(function(x){ assert.eq(x._id, x.key, "_id == key"); @@ -29,5 +33,14 @@ coll.find().forEach(function(x){ }); +coll.update({_id:1, key:1}, {$set: {key:2}}); +err = db.getLastErrorObj(); +assert.eq(coll.findOne({_id:1}).key, 1, 'key unchanged'); +assert.eq(err.code, 13123, 'key error code 1'); +assert.eq(err.code, 13123, 'key error code 2'); + +coll.update({_id:1, key:1}, {$set: {foo:2}}); +assert.isnull(db.getLastError(), 'getLastError reset'); + s.stop() |