summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorAntonin Kral <a.kral@bobek.cz>2012-06-05 19:50:36 +0200
committerAntonin Kral <a.kral@bobek.cz>2012-06-05 19:50:36 +0200
commit291c9687fb2307dc22d1f269eb4d0aa98fe8cadc (patch)
treef46fac7bf8530d100aa55da89dfaa53490fbb350 /jstests
parent6d0f215499dda50fdba4a6f60ab359efe0054e0d (diff)
parent3703a282eca7e79e91f4bd651b1b861b76dc6c68 (diff)
downloadmongodb-291c9687fb2307dc22d1f269eb4d0aa98fe8cadc.tar.gz
Merge tag 'upstream/2.0.6'
Upstream version 2.0.6
Diffstat (limited to 'jstests')
-rw-r--r--jstests/queryoptimizer7.js24
-rw-r--r--jstests/queryoptimizera.js87
-rw-r--r--jstests/replsets/replset7.js46
-rw-r--r--jstests/replsets/slavedelay3.js38
-rw-r--r--jstests/slowNightly/sharding_passthrough.js2
5 files changed, 196 insertions, 1 deletions
diff --git a/jstests/queryoptimizer7.js b/jstests/queryoptimizer7.js
new file mode 100644
index 0000000..c4ff75b
--- /dev/null
+++ b/jstests/queryoptimizer7.js
@@ -0,0 +1,24 @@
+// Test query retry with a query that is non multikey unusable and unsatisfiable. SERVER-5581
+
+t = db.jstests_queryoptimizer7;
+t.drop();
+
+t.ensureIndex( { a:1 } );
+t.ensureIndex( { b:1 } );
+
+for( i = 0; i < 25; ++i ) {
+ t.save( { a:0, b:'' } ); // a:0 documents have small b strings.
+}
+big = new Array( 1000000 ).toString();
+for( i = 0; i < 50; ++i ) {
+ t.save( { a:[1,3], b:big } ); // a:[1,3] documents have very large b strings.
+}
+
+// Record the a:1 index for the query pattern for { a: { $lt:1 } }, { b:1 }.
+assert.eq( 'BtreeCursor a_1', t.find( { a:{ $lt:1 } } ).sort( { b:1 } ).explain().cursor );
+
+// The multikey query pattern for this query will match that of the previous query.
+// The a:1 index will be retried for this query but fail because an in memory sort must
+// be performed on a larger data set. Because the query { a:{ $lt:2, $gt:2 } } is
+// unsatisfiable, no attempt will be made to clear its query pattern.
+assert.lt( -1, t.find( { a:{ $lt:2, $gt:2 } } ).sort( { b:1 } ).itcount() );
diff --git a/jstests/queryoptimizera.js b/jstests/queryoptimizera.js
new file mode 100644
index 0000000..48d7ccf
--- /dev/null
+++ b/jstests/queryoptimizera.js
@@ -0,0 +1,87 @@
+// Check that a warning message about doing a capped collection scan for a query with an _id
+// constraint is printed at appropriate times. SERVER-5353
+
+function numWarnings() {
+ logs = db.adminCommand( { getLog:"global" } ).log
+ ret = 0;
+ logs.forEach( function( x ) {
+ if ( x.match( warningMatchRegexp ) ) {
+ ++ret;
+ }
+ } );
+ return ret;
+}
+
+collectionNameIndex = 0;
+
+// Generate a collection name not already present in the log.
+do {
+ testCollectionName = 'jstests_queryoptimizera__' + collectionNameIndex++;
+ warningMatchString = 'unindexed _id query on capped collection.*collection: test.' +
+ testCollectionName;
+ warningMatchRegexp = new RegExp( warningMatchString );
+
+} while( numWarnings() > 0 );
+
+t = db[ testCollectionName ];
+t.drop();
+
+notCappedCollectionName = testCollectionName + '_notCapped';
+
+notCapped = db[ notCappedCollectionName ];
+notCapped.drop();
+
+db.createCollection( testCollectionName, { capped:true, size:1000 } );
+db.createCollection( notCappedCollectionName, { autoIndexId:false } );
+
+t.insert( {} );
+notCapped.insert( {} );
+
+oldNumWarnings = 0;
+
+function assertNoNewWarnings() {
+ assert.eq( oldNumWarnings, numWarnings() );
+}
+
+function assertNewWarning() {
+ ++oldNumWarnings;
+ assert.eq( oldNumWarnings, numWarnings() );
+}
+
+// Simple _id query without an _id index.
+t.find( { _id:0 } ).itcount();
+assertNewWarning();
+
+// Simple _id query without an _id index, on a non capped collection.
+notCapped.find( { _id:0 } ).itcount();
+assertNoNewWarnings();
+
+// A multi field query, including _id.
+t.find( { _id:0, a:0 } ).itcount();
+assertNewWarning();
+
+// An unsatisfiable query.
+t.find( { _id:0, a:{$in:[]} } ).itcount();
+assertNoNewWarnings();
+
+// An hinted query.
+t.find( { _id:0 } ).hint( { $natural:1 } ).itcount();
+assertNoNewWarnings();
+
+// Retry a multi field query.
+t.find( { _id:0, a:0 } ).itcount();
+assertNewWarning();
+
+// Warnings should not be printed when an index is added on _id.
+t.ensureIndex( { _id:1 } );
+
+t.find( { _id:0 } ).itcount();
+assertNoNewWarnings();
+
+t.find( { _id:0, a:0 } ).itcount();
+assertNoNewWarnings();
+
+t.find( { _id:0, a:0 } ).itcount();
+assertNoNewWarnings();
+
+t.drop(); // cleanup
diff --git a/jstests/replsets/replset7.js b/jstests/replsets/replset7.js
new file mode 100644
index 0000000..f29c1fb
--- /dev/null
+++ b/jstests/replsets/replset7.js
@@ -0,0 +1,46 @@
+
+// test for SERVER-5040 - if documents move forward during an initial sync.
+
+var rt = new ReplSetTest( { name : "replset7tests" , nodes: 1 } );
+
+var nodes = rt.startSet();
+rt.initiate();
+var master = rt.getMaster();
+
+var md = master.getDB( 'd' );
+var mdc = md[ 'c' ];
+
+// prep the data
+var doccount = 100000;
+for( i = 0; i < doccount; ++i ) {
+ mdc.insert( { _id:i, x:i } );
+}
+md.getLastError();
+
+mdc.ensureIndex( { x : 1 }, { unique: true } );
+md.getLastError();
+
+// add a secondary
+var slave = rt.add();
+rt.reInitiate();
+print ("initiation complete!");
+var sc = slave.getDB( 'd' )[ 'c' ];
+slave.setSlaveOk();
+
+// Wait for slave to start cloning.
+//assert.soon( function() { c = sc.find( { _id:1, x:1 } ); print( c ); return c > 0; } );
+
+
+// Move all documents to the end by growing it
+for (i = 0; i < doccount; ++i) {
+ mdc.remove( { _id:i, x:i } );
+ mdc.insert( { _id:doccount+i, x:i, bigstring: "ayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayay" } );
+ md.getLastError();
+}
+
+// Wait for replication to catch up.
+rt.awaitSecondaryNodes();
+
+// Do we have an index?
+assert.eq (1, slave.getDB( 'd' )['system.indexes']
+ .find({"v" : 1,"key" : {"x" : 1},"unique" : true,"ns" : "d.c","name" : "x_1"}).count());
diff --git a/jstests/replsets/slavedelay3.js b/jstests/replsets/slavedelay3.js
new file mode 100644
index 0000000..e89fe96
--- /dev/null
+++ b/jstests/replsets/slavedelay3.js
@@ -0,0 +1,38 @@
+load("jstests/replsets/rslib.js");
+
+var name = 'slavedelay3';
+var replTest = new ReplSetTest({ name: name, nodes: 3 });
+var nodes = replTest.startSet();
+var config = replTest.getReplSetConfig();
+// ensure member 0 is primary
+config.members[0].priority = 2;
+config.members[1].priority = 0;
+config.members[1].slaveDelay = 5;
+
+replTest.initiate(config);
+replTest.awaitReplication();
+replTest.bridge();
+
+
+var master = replTest.getMaster().getDB(name);
+ var slaveConns = replTest.liveNodes.slaves;
+ var slave = [];
+ for (var i in slaveConns) {
+ var d = slaveConns[i].getDB(name);
+ d.getMongo().setSlaveOk();
+ slave.push(d);
+ }
+
+waitForAllMembers(master);
+
+
+
+replTest.partition(0,2);
+replTest.awaitReplication();
+
+master.foo.insert({x:1});
+
+// make sure the record still appears in the remote slave
+assert.soon( function() { return slave[1].foo.findOne() != null; } );
+
+replTest.stopSet(); \ No newline at end of file
diff --git a/jstests/slowNightly/sharding_passthrough.js b/jstests/slowNightly/sharding_passthrough.js
index d81df68..fa1f7bc 100644
--- a/jstests/slowNightly/sharding_passthrough.js
+++ b/jstests/slowNightly/sharding_passthrough.js
@@ -72,7 +72,7 @@ files.forEach(
return;
}
// These aren't supposed to get run under sharding:
- if (/[\/\\](dbadmin|error1|fsync|fsync2|geo.*|indexh|remove5|update4|notablescan|compact.*|check_shard_index|bench_test.*|mr_replaceIntoDB)\.js$/.test(x.name)) {
+ if (/[\/\\](dbadmin|error1|fsync|fsync2|geo.*|indexh|remove5|update4|notablescan|compact.*|check_shard_index|bench_test.*|mr_replaceIntoDB|queryoptimizera)\.js$/.test(x.name)) {
print(" >>>>>>>>>>>>>>> skipping test that would fail under sharding " + x.name)
return;
}