summaryrefslogtreecommitdiff
path: root/jstests/slowNightly/sharding_balance2.js
blob: 3296ff6a6cab64a28803e83e1ac3fd21dab7d53d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
// sharding_balance2.js

s = new ShardingTest( "slow_sharding_balance2" , 2 , 1 , 1 , { chunksize : 1 , manualAddShard : true } )

names = s.getConnNames();
for ( var i=0; i<names.length; i++ ){
    if ( i==1 ) {
        // We set maxSize of the shard to something artificially low. That mongod would still 
        // allocate and mmap storage as usual but the balancing mongos would not ship any chunk
        // to it.
        s.adminCommand( { addshard : names[i] , maxSize : 1 } );
    } else {
        s.adminCommand( { addshard : names[i] } );
    }
}

s.adminCommand( { enablesharding : "test" } );

s.config.settings.find().forEach( printjson )

db = s.getDB( "test" );

bigString = ""
while ( bigString.length < 10000 )
    bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";

inserted = 0;
num = 0;
while ( inserted < ( 40 * 1024 * 1024 ) ){
    db.foo.insert( { _id : num++ , s : bigString } );
    inserted += bigString.length;
}

db.getLastError();
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
assert.lt( 20 , s.config.chunks.count()  , "setup2" );

function diff(){
    var x = s.chunkCounts( "foo" );
    printjson( x )
    return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
}

assert.lt( 10 , diff() );
print( diff() )

var currDiff = diff();
assert.repeat( function(){
    var d = diff();
    return d != currDiff;
} , "balance with maxSize should not have happened" , 1000 * 30 , 5000 );
    

s.stop();