diff options
Diffstat (limited to 'jstests/dur')
-rwxr-xr-x | jstests/dur/a_quick.js | 123 | ||||
-rw-r--r-- | jstests/dur/closeall.js | 80 | ||||
-rw-r--r-- | jstests/dur/diskfull.js | 136 | ||||
-rw-r--r-- | jstests/dur/dropdb.js | 163 | ||||
-rwxr-xr-x | jstests/dur/dur1.js | 154 | ||||
-rw-r--r-- | jstests/dur/dur2.js | 92 | ||||
-rwxr-xr-x | jstests/dur/lsn.js | 126 | ||||
-rwxr-xr-x | jstests/dur/manyRestart.js | 191 | ||||
-rw-r--r-- | jstests/dur/md5.js | 101 | ||||
-rwxr-xr-x | jstests/dur/oplog.js | 159 |
10 files changed, 1325 insertions, 0 deletions
diff --git a/jstests/dur/a_quick.js b/jstests/dur/a_quick.js new file mode 100755 index 0000000..f703f3f --- /dev/null +++ b/jstests/dur/a_quick.js @@ -0,0 +1,123 @@ +/* quick.js + test durability + this file should always run quickly + other tests can be slow +*/ + +testname = "a_quick"; +load("jstests/_tst.js");
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
+ } +} + +// directories +var path1 = "/data/db/quicknodur"; +var path2 = "/data/db/quickdur"; + +// non-durable version +tst.log("start mongod without dur"); +var conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur"); +tst.log("without dur work"); +var d = conn.getDB("test"); +d.foo.insert({ _id:123 }); +d.getLastError(); +tst.log("stop without dur"); +stopMongod(30000); + +// durable version +tst.log("start mongod with dur"); +conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--durOptions", 8); +tst.log("with dur work"); +d = conn.getDB("test"); +d.foo.insert({ _id: 123 }); +d.getLastError(); // wait + +// we could actually do getlasterror fsync:1 now, but maybe this is agood +// as it will assure that commits happen on a timely basis. a bunch of the other dur/*js +// tests use fsync +tst.log("sleep a bit for a group commit"); +sleep(8000); + +// kill the process hard +tst.log("kill -9 mongod"); +stopMongod(30001, /*signal*/9); + +// journal file should be present, and non-empty as we killed hard + +// we will force removal of a datafile to be sure we can recreate everything +// without it being present.
+removeFile(path2 + "/test.0");
+
+// for that to work, we can't skip anything though:
+removeFile(path2 + "/journal/lsn"); + +// with the file deleted, we MUST start from the beginning of the journal. +// thus this check to be careful +var files = listFiles(path2 + "/journal/"); +if (files.some(function (f) { return f.name.indexOf("lsn") >= 0; })) { + print("\n\n\n"); + print(path2); + printjson(files); + assert(false, "a journal/lsn file is present which will make this test potentially fail."); +} + +// restart and recover +tst.log("restart and recover"); +conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--dur", "--durOptions", 9); +tst.log("check data results");
+d = conn.getDB("test");
+
+var countOk = (d.foo.count() == 1);
+if (!countOk) {
+ print("\n\n\na_quick.js FAIL count " + d.foo.count() + " is wrong\n\n\n"); + // keep going - want to see if the diff matches. if so the sleep() above was too short? +} + +tst.log("stop"); +stopMongod(30002); + +// at this point, after clean shutdown, there should be no journal files +tst.log("check no journal files"); +checkNoJournalFiles(path2 + "/journal"); + +tst.log("check data matches"); +var diff = tst.diff(path1 + "/test.ns", path2 + "/test.ns"); +print("diff of .ns files returns:" + diff); + +function showfiles() { + print("\n\nERROR: files for dur and nodur do not match"); + print(path1 + " files:"); + printjson(listFiles(path1)); + print(path2 + " files:"); + printjson(listFiles(path2)); + print(); +} + +if (diff != "") { + showfiles(); + assert(diff == "", "error test.ns files differ"); +} + +diff = tst.diff(path1 + "/test.0", path2 + "/test.0"); +print("diff of .0 files returns:" + diff); +if (diff != "") { + showfiles(); + assert(diff == "", "error test.0 files differ"); +}
+
+assert(countOk, "a_quick.js document count after recovery was not the expected value");
+ +tst.success(); diff --git a/jstests/dur/closeall.js b/jstests/dur/closeall.js new file mode 100644 index 0000000..f169f06 --- /dev/null +++ b/jstests/dur/closeall.js @@ -0,0 +1,80 @@ +// testing closealldatabases concurrency +// this is also a test of recoverFromYield() as that will get exercised by the update + +function f() { + var variant = (new Date()) % 4; + var path = "/data/db/closeall"; + var path2 = "/data/db/closeall_slave"; + var ourdb = "closealltest"; + + print("closeall.js start mongod variant:" + variant); + var options = (new Date()-0)%2==0 ? 8 : 0; + print("closeall.js --durOptions " + options); + var N = 1000; + if (options) + N = 300; + + // use replication to exercise that code too with a close, and also to test local.sources with a close + var conn = startMongodEmpty("--port", 30001, "--dbpath", path, "--dur", "--durOptions", options, "--master", "--oplogSize", 64); + var connSlave = startMongodEmpty("--port", 30002, "--dbpath", path2, "--dur", "--durOptions", options, "--slave", "--source", "localhost:30001"); + + var slave = connSlave.getDB(ourdb); + + // we'll use two connections to make a little parallelism + var db1 = conn.getDB(ourdb); + var db2 = new Mongo(db1.getMongo().host).getDB(ourdb); + + print("closeall.js run test"); + + for( var i = 0; i < N; i++ ) { + db1.foo.insert({x:1}); // this does wait for a return code so we will get some parallelism + if( i % 7 == 0 ) + db1.foo.insert({x:99, y:2}); + if( i % 49 == 0 ) + db1.foo.update({ x: 99 }, { a: 1, b: 2, c: 3, d: 4 }); + if (i % 100 == 0) + db1.foo.find(); + if( i == 800 )
+ db1.foo.ensureIndex({ x: 1 }); + var res = null;
+ try { + if( variant == 1 ) + sleep(0); + else if( variant == 2 ) + sleep(1); + else if( variant == 3 && i % 10 == 0 ) + print(i); + res = db2.adminCommand("closeAllDatabases");
+ }
+ catch (e) {
+ sleep(5000); // sleeping a little makes console output order prettier
+ print("\n\n\nFAIL closeall.js closeAllDatabases command invocation threw an exception. i:" + i);
+ try {
+ print("getlasterror:");
+ printjson(db2.getLastErrorObj());
+ print("trying one more closealldatabases:");
+ res = db2.adminCommand("closeAllDatabases");
+ printjson(res);
+ }
+ catch (e) {
+ print("got another exception : " + e);
+ }
+ print("\n\n\n");
+ // sleep a little to capture possible mongod output?
+ sleep(2000);
+ throw e;
+ }
+ assert( res.ok, "closeAllDatabases res.ok=false");
+ }
+
+ print("closeall.js end test loop. slave.foo.count:");
+ print(slave.foo.count());
+
+ print("closeall.js shutting down servers");
+ stopMongod(30002);
+ stopMongod(30001);
+}
+
+f();
+sleep(500); +print("SUCCESS closeall.js"); diff --git a/jstests/dur/diskfull.js b/jstests/dur/diskfull.js new file mode 100644 index 0000000..da45c20 --- /dev/null +++ b/jstests/dur/diskfull.js @@ -0,0 +1,136 @@ +/** Test running out of disk space with durability enabled */ + +startPath = "/data/db/diskfulltest"; +recoverPath = "/data/db/dur_diskfull"; + +doIt = false; +files = listFiles( "/data/db" ); +for ( i in files ) { + if ( files[ i ].name == startPath ) { + doIt = true; + } +} + +if ( !doIt ) { + print( "path " + startPath + " missing, skipping diskfull test" ); + doIt = false; +}
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
+ }
+}
+ +/** Clear dbpath without removing and recreating diskfulltest directory, as resetDbpath does */ +function clear() { + files = listFiles( startPath ); + files.forEach( function( x ) { removeFile( x.name ) } ); +} + +function log(str) { + print(); + if(str) + print(testname+" step " + step++ + " " + str); + else + print(testname+" step " + step++); +} + +function work() { + log("work"); + try { + var d = conn.getDB("test"); + + big = new Array( 5000 ).toString(); + for( i = 0; i < 10000; ++i ) { + d.foo.insert( { _id:i, b:big } ); + } + + d.getLastError(); + } catch ( e ) { + print( e ); + raise( e ); + } finally { + log("endwork"); + } +} + +function verify() { + log("verify"); + var d = conn.getDB("test"); + c = d.foo.count(); + v = d.foo.validate(); + // not much we can guarantee about the writes, just validate when possible + if ( c != 0 && !v.valid ) { + printjson( v ); + print( c ); + assert( v.valid ); + assert.gt( c, 0 ); + } +} + +function runFirstMongodAndFillDisk() { + log(); + + clear(); + conn = startMongodNoReset("--port", 30001, "--dbpath", startPath, "--dur", "--smallfiles", "--durOptions", 8, "--noprealloc"); + + assert.throws( work, null, "no exception thrown when exceeding disk capacity" ); + waitMongoProgramOnPort( 30001 ); + + // the above wait doesn't work on windows + sleep(5000); +} + +function runSecondMongdAndRecover() { + // restart and recover + log(); + conn = startMongodNoReset("--port", 30003, "--dbpath", startPath, "--dur", "--smallfiles", "--durOptions", 8, "--noprealloc"); + verify(); + + log("stop"); + stopMongod(30003); + + // stopMongod seems to be asynchronous (hmmm) so we sleep here. + sleep(5000); + + // at this point, after clean shutdown, there should be no journal files
+ log("check no journal files");
+ checkNoJournalFiles(startPath + "/journal/");
+ + log(); +} + +function someWritesInJournal() { + runFirstMongodAndFillDisk(); + runSecondMongdAndRecover(); +} + +function noWritesInJournal() { + // It is too difficult to consistently trigger cases where there are no existing journal files due to lack of disk space, but + // if we were to test this case we would need to manualy remove the lock file. +// removeFile( startPath + "/mongod.lock" ); +} + +if ( doIt ) { + + var testname = "dur_diskfull"; + var step = 1; + var conn = null; + + someWritesInJournal(); + noWritesInJournal(); + + print(testname + " SUCCESS"); + +}
\ No newline at end of file diff --git a/jstests/dur/dropdb.js b/jstests/dur/dropdb.js new file mode 100644 index 0000000..7f82cd7 --- /dev/null +++ b/jstests/dur/dropdb.js @@ -0,0 +1,163 @@ +/* durability test dropping a database +*/ + +var debugging = false; +var testname = "dropdb"; +var step = 1; +var conn = null; + +function checkNoJournalFiles(path, pass) { + var files = listFiles(path); + if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) { + if (pass == null) { + // wait a bit longer for mongod to potentially finish if it is still running. + sleep(10000); + return checkNoJournalFiles(path, 1); + } + print("\n\n\n"); + print("FAIL path:" + path); + print("unexpected files:"); + printjson(files); + assert(false, "FAIL a journal/lsn file is present which is unexpected"); + } +} + +function runDiff(a, b) { + function reSlash(s) { + var x = s; + if (_isWindows()) { + while (1) { + var y = x.replace('/', '\\'); + if (y == x) + break; + x = y; + } + } + return x; + } + a = reSlash(a); + b = reSlash(b); + print("diff " + a + " " + b); + return run("diff", a, b); +} + +function log(str) { + if (str) + print("\n" + testname + " step " + step++ + " " + str); + else + print("\n" + testname + " step " + step++); +} + +// if you do inserts here, you will want to set _id. otherwise they won't match on different +// runs so we can't do a binary diff of the resulting files to check they are consistent. +function work() { + log("work (add data, drop database)"); + + var e = conn.getDB("teste"); + e.foo.insert({ _id: 99 }); + + var d = conn.getDB("test"); + d.foo.insert({ _id: 3, x: 22 }); + d.bar.insert({ _id: 3, x: 22 }); + + d.dropDatabase(); + + d.foo.insert({ _id: 100 }); + + // assure writes applied in case we kill -9 on return from this function + assert(d.runCommand({ getlasterror: 1, fsync: 1 }).ok, "getlasterror not ok"); +} + +function verify() { + log("verify"); + var d = conn.getDB("test"); + var count = d.foo.count(); + if (count != 1) { + print("going to fail, count mismatch in verify()"); + sleep(10000); // easier to read the output this way + print("\n\n\ndropdb.js FAIL test.foo.count() should be 1 but is : " + count); + print(d.foo.count() + "\n\n\n"); + assert(false); + } + assert(d.foo.findOne()._id == 100, "100"); + + print("dropdb.js teste.foo.findOne:"); + printjson(conn.getDB("teste").foo.findOne()); + + var teste = conn.getDB("teste"); + print("dropdb count " + teste.foo.count()); + assert(teste.foo.findOne()._id == 99, "teste"); + +} + +if (debugging) { + // mongod already running in debugger + conn = db.getMongo(); + work(); + verify(); + sleep(30000); + quit(); +} + +// directories +var path1 = "/data/db/" + testname + "nodur"; +var path2 = "/data/db/" + testname + "dur"; + +// non-durable version +log("mongod nodur"); +conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur", "--smallfiles"); +work(); +verify(); +stopMongod(30000); + +// durable version +log("mongod dur"); +conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8); +work(); +verify(); + +// kill the process hard +log("kill 9"); +stopMongod(30001, /*signal*/9); + +// journal file should be present, and non-empty as we killed hard
+
+// we will force removal of a datafile to be sure we can recreate everything.
+removeFile(path2 + "/test.0");
+// the trick above is only valid if journals haven't rotated out, and also if lsn isn't skipping
+removeFile(path2 + "/lsn");
+ +log("restart and recover"); +conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 9); + +log("verify after recovery"); +verify(); + +log("stop mongod 30002"); +stopMongod(30002); +sleep(5000); + +// at this point, after clean shutdown, there should be no journal files +log("check no journal files"); +checkNoJournalFiles(path2 + "/journal"); + +log("check data matches ns"); +var diff = runDiff(path1 + "/test.ns", path2 + "/test.ns"); +if (diff != "") { + print("\n\n\nDIFFERS\n"); + print(diff); +} +assert(diff == "", "error test.ns files differ"); + +log("check data matches .0"); +diff = runDiff(path1 + "/test.0", path2 + "/test.0"); +if (diff != "") { + print("\n\n\nDIFFERS\n"); + print(diff); +} +assert(diff == "", "error test.0 files differ"); + +log("check data matches done"); + +print(testname + " SUCCESS"); + diff --git a/jstests/dur/dur1.js b/jstests/dur/dur1.js new file mode 100755 index 0000000..4c8f1bf --- /dev/null +++ b/jstests/dur/dur1.js @@ -0,0 +1,154 @@ +/* + test durability +*/ + +var debugging = false; +var testname = "dur1"; +var step = 1; +var conn = null;
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
+ }
+}
+ +function runDiff(a, b) { + function reSlash(s) { + var x = s; + if (_isWindows()) { + while (1) { + var y = x.replace('/', '\\'); + if (y == x) + break; + x = y; + } + } + return x; + } + a = reSlash(a); + b = reSlash(b); + print("diff " + a + " " + b); + return run("diff", a, b); +} + +function log(str) { + print(); + if(str) + print(testname+" step " + step++ + " " + str); + else + print(testname+" step " + step++); +} + +// if you do inserts here, you will want to set _id. otherwise they won't match on different +// runs so we can't do a binary diff of the resulting files to check they are consistent. +function work() { + log("work"); + var d = conn.getDB("test"); + d.foo.insert({ _id: 3, x: 22 }); + d.foo.insert({ _id: 4, x: 22 }); + d.a.insert({ _id: 3, x: 22, y: [1, 2, 3] }); + d.a.insert({ _id: 4, x: 22, y: [1, 2, 3] }); + d.a.update({ _id: 4 }, { $inc: { x: 1} }); + + // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually: + d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 }); + +// d.a.update({ _id: 4 }, { $inc: { x: 1} }); +// d.a.reIndex(); + + // assure writes applied in case we kill -9 on return from this function + d.getLastError();
+
+ log("endwork");
+ return d; +} + +function verify() { + log("verify");
+ var d = conn.getDB("test");
+ var ct = d.foo.count();
+ if (ct != 2) {
+ print("\n\n\nFAIL dur1.js count is wrong in verify(): " + ct + "\n\n\n");
+ assert(ct == 2);
+ } +} + +if( debugging ) { + // mongod already running in debugger + conn = db.getMongo(); + work(); + sleep(30000); + quit(); +} + +log(); + +// directories +var path1 = "/data/db/" + testname+"nodur"; +var path2 = "/data/db/" + testname+"dur"; + +// non-durable version +log(); +conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur", "--smallfiles"); +work(); +stopMongod(30000); + +// durable version +log(); +conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8); +work(); + +// wait for group commit. +printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1})); + +// kill the process hard +stopMongod(30001, /*signal*/9); + +// journal file should be present, and non-empty as we killed hard + +// restart and recover +log(); +conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8); +verify(); + +log("stop"); +stopMongod(30002); + +// stopMongod seems to be asynchronous (hmmm) so we sleep here. +sleep(5000); + +// at this point, after clean shutdown, there should be no journal files +log("check no journal files"); +checkNoJournalFiles(path2 + "/journal"); + +log("check data matches ns"); +var diff = runDiff(path1 + "/test.ns", path2 + "/test.ns"); +if (diff != "") { + print("\n\n\nDIFFERS\n"); + print(diff); +} +assert(diff == "", "error test.ns files differ"); + +log("check data matches .0"); +var diff = runDiff(path1 + "/test.0", path2 + "/test.0"); +if (diff != "") { + print("\n\n\nDIFFERS\n"); + print(diff); +} +assert(diff == "", "error test.0 files differ"); + +log("check data matches done"); + +print(testname + " SUCCESS"); + diff --git a/jstests/dur/dur2.js b/jstests/dur/dur2.js new file mode 100644 index 0000000..dd0ab0f --- /dev/null +++ b/jstests/dur/dur2.js @@ -0,0 +1,92 @@ +/* test durability + runs mongod, kill -9's, recovers +*/ + +var debugging = false; +var testname = "dur2"; +var step = 1; +var conn = null; + +var start = new Date(); +function howLongSecs() { + return (new Date() - start) / 1000; +} + +function log(str) { + if(str) + print("\n" + testname+" step " + step++ + " " + str); + else + print(testname+" step " + step++); +} + +function verify() { + log("verify"); + var d = conn.getDB("test"); + var mycount = d.foo.count(); + //print("count:" + mycount); + assert(mycount>2, "count wrong"); +} + +function work() { + log("work"); + x = 'x'; while(x.length < 1024) x+=x; + var d = conn.getDB("test"); + d.foo.drop(); + d.foo.insert({}); + + // go long enough we will have time to kill it later during recovery + var j = 2; + var MaxTime = 15; + if (Math.random() < 0.1) { + print("dur2.js DOING A LONGER (120 sec) PASS - if an error, try long pass to replicate"); + MaxTime = 120; + } + while (1) { + d.foo.insert({ _id: j, z: x }); + d.foo.update({ _id: j }, { $inc: { a: 1} }); + if (j % 25 == 0) + d.foo.remove({ _id: j }); + j++; + if( j % 3 == 0 ) + d.foo.update({ _id: j }, { $inc: { a: 1} }, true); + if (j % 10000 == 0) + print(j); + if (howLongSecs() > MaxTime) + break; + } + + verify(); + d.runCommand({ getLastError: 1, fsync: 1 }); +} + +if( debugging ) { + // mongod already running in debugger + print("DOING DEBUG MODE BEHAVIOR AS 'db' IS DEFINED -- RUN mongo --nodb FOR REGULAR TEST BEHAVIOR"); + conn = db.getMongo(); + work(); + sleep(30000); + quit(); +} + +// directories +var path = "/data/db/" + testname+"dur"; + +log("run mongod with --dur"); +conn = startMongodEmpty("--port", 30001, "--dbpath", path, "--dur", "--smallfiles", "--durOptions", /*DurParanoid*/8, "--master", "--oplogSize", 64); +work();
+
+log("kill -9"); +stopMongod(30001, /*signal*/9);
+
+// journal file should be present, and non-empty as we killed hard
+assert(listFiles(path + "/journal/").length > 0, "journal directory is unexpectantly empty after kill"); + +// restart and recover +log("restart mongod and recover"); +conn = startMongodNoReset("--port", 30002, "--dbpath", path, "--dur", "--smallfiles", "--durOptions", 8, "--master", "--oplogSize", 64); +verify(); + +log("stopping mongod 30002"); +stopMongod(30002); + +print(testname + " SUCCESS"); diff --git a/jstests/dur/lsn.js b/jstests/dur/lsn.js new file mode 100755 index 0000000..505d8f5 --- /dev/null +++ b/jstests/dur/lsn.js @@ -0,0 +1,126 @@ +/* test durability, specifically last sequence number function + runs mongod, kill -9's, recovers + then writes more data and verifies with DurParanoid that it matches +*/ + +var debugging = false; +var testname = "lsn"; +var step = 1; +var conn = null; + +var start = new Date(); +function howLongSecs() { + return (new Date() - start) / 1000; +} + +function log(str) { + if(str) + print("\n" + testname+" step " + step++ + " " + str); + else + print(testname+" step " + step++); +} + +function verify() { + log("verify"); + var d = conn.getDB("test"); + var mycount = d.foo.count(); + print("count:" + mycount); + assert(mycount>2, "count wrong"); +} + +// if you do inserts here, you will want to set _id. otherwise they won't match on different +// runs so we can't do a binary diff of the resulting files to check they are consistent. +function work() { + log("work"); + x = 'x'; while(x.length < 1024) x+=x; + var d = conn.getDB("test"); + d.foo.drop(); + d.foo.insert({}); + + // go long enough we will have time to kill it later during recovery + var j = 2; + var MaxTime = 15; + if (Math.random() < 0.05) { + print("doing a longer pass"); + MaxTime = 90; + } + while (1) { + d.foo.insert({ _id: j, z: x }); + d.foo.update({ _id: j }, { $inc: { a: 1} }); + if (j % 25 == 0) + d.foo.remove({ _id: j }); + j++; + if( j % 3 == 0 ) + d.foo.update({ _id: j }, { $inc: { a: 1} }, true); + if (j % 10000 == 0) + print(j); + if (howLongSecs() > MaxTime) + break; + } + + verify(); + d.runCommand({ getLastError: 1, fsync: 1 }); +} + +if( debugging ) { + // mongod already running in debugger + print("DOING DEBUG MODE BEHAVIOR AS 'db' IS DEFINED -- RUN mongo --nodb FOR REGULAR TEST BEHAVIOR"); + conn = db.getMongo(); + work(); + sleep(30000); + quit(); +} + +// directories +var path2 = "/data/db/" + testname+"dur"; + +// run mongod with a short --syncdelay to make LSN writing sooner
+log("run mongod --dur and a short --syncdelay"); +conn = startMongodEmpty("--syncdelay", 2, "--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", /*DurParanoid*/8, "--master", "--oplogSize", 64); +work();
+
+log("wait a while for a sync and an lsn write");
+sleep(14); // wait for lsn write
+
+log("kill mongod -9");
+stopMongod(30001, /*signal*/9); + +// journal file should be present, and non-empty as we killed hard
+
+// check that there is an lsn file
+{
+ var files = listFiles(path2 + "/journal/");
+ assert(files.some(function (f) { return f.name.indexOf("lsn") >= 0; }),
+ "lsn.js FAIL no lsn file found after kill, yet one is expected");
+}
+/*assert.soon(
+ function () {
+ var files = listFiles(path2 + "/journal/");
+ return files.some(function (f) { return f.name.indexOf("lsn") >= 0; });
+ },
+ "lsn.js FAIL no lsn file found after kill, yet one is expected"
+);*/ + +// restart and recover +log("restart mongod, recover, verify"); +conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 24, "--master", "--oplogSize", 64); +verify(); + +// idea here is to verify (in a simplistic way) that we are in a good state to do further ops after recovery +log("add data after recovery"); +{ + var d = conn.getDB("test"); + d.xyz.insert({ x: 1 }); + d.xyz.insert({ x: 1 }); + d.xyz.insert({ x: 1 }); + d.xyz.update({}, { $set: { x: "aaaaaaaaaaaa"} }); + d.xyz.reIndex(); + d.xyz.drop(); + sleep(1); + d.xyz.insert({ x: 1 }); +} + +log("stop mongod 30002"); +stopMongod(30002); + +print(testname + " SUCCESS"); diff --git a/jstests/dur/manyRestart.js b/jstests/dur/manyRestart.js new file mode 100755 index 0000000..04e4318 --- /dev/null +++ b/jstests/dur/manyRestart.js @@ -0,0 +1,191 @@ +/* + test durability +*/ + +var debugging = false; +var testname = "manyRestarts"; +var step = 1; +var conn = null; + +function checkNoJournalFiles(path, pass) { + var files = listFiles(path); + if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) { + if (pass == null) { + // wait a bit longer for mongod to potentially finish if it is still running. + sleep(10000); + return checkNoJournalFiles(path, 1); + } + print("\n\n\n"); + print("FAIL path:" + path); + print("unexpected files:"); + printjson(files); + assert(false, "FAIL a journal/lsn file is present which is unexpected"); + } +} + +function runDiff(a, b) { + function reSlash(s) { + var x = s; + if (_isWindows()) { + while (1) { + var y = x.replace('/', '\\'); + if (y == x) + break; + x = y; + } + } + return x; + } + a = reSlash(a); + b = reSlash(b); + print("diff " + a + " " + b); + return run("diff", a, b); +} + +function log(str) { + print(); + if(str) + print(testname+" step " + step++ + " " + str); + else + print(testname+" step " + step++); +} + +// if you do inserts here, you will want to set _id. otherwise they won't match on different +// runs so we can't do a binary diff of the resulting files to check they are consistent. +function work() { + log("work"); + var d = conn.getDB("test"); + d.foo.insert({ _id: 3, x: 22 }); + d.foo.insert({ _id: 4, x: 22 }); + d.a.insert({ _id: 3, x: 22, y: [1, 2, 3] }); + d.a.insert({ _id: 4, x: 22, y: [1, 2, 3] }); + d.a.update({ _id: 4 }, { $inc: { x: 1} }); + + // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually: + d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 }); + +// d.a.update({ _id: 4 }, { $inc: { x: 1} }); +// d.a.reIndex(); + + // assure writes applied in case we kill -9 on return from this function + d.getLastError(); + log("endwork"); + return d; +} + +function addRows() { + var rand = Random.randInt(10000); + log("add rows " + rand); + var d = conn.getDB("test"); + for (var j = 0; j < rand; ++j) { + d.rows.insert({a:1, b: "blah"}); + } + return rand; +} + +function verify() { + log("verify"); + var d = conn.getDB("test"); + assert.eq(d.foo.count(), 2, "collection count is wrong"); + assert.eq(d.a.count(), 2, "collection count is wrong"); +} + +function verifyRows(nrows) { + log("verify rows " + nrows); + var d = conn.getDB("test"); + assert.eq(d.rows.count(), nrows, "collection count is wrong"); +} + +if( debugging ) { + // mongod already running in debugger + conn = db.getMongo(); + work(); + sleep(30000); + quit(); +} + +log(); + +// directories +var path1 = "/data/db/" + testname+"nodur"; +var path2 = "/data/db/" + testname+"dur"; + +// non-durable version +log("starting 30000"); +conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur", "--smallfiles"); +work(); +stopMongod(30000); + +log("starting 30001"); +conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8); +work(); +// wait for group commit. +printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1})); + +stopMongod(30001); +sleep(5000); + +for (var i = 0; i < 3; ++i) { + + // durable version + log("restarting 30001"); + conn = startMongodNoReset("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8); + + // wait for group commit. + printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1})); + + verify(); + + // kill the process hard + log("hard kill"); + stopMongod(30001, /*signal*/9); + + sleep(5000); +} + +// journal file should be present, and non-empty as we killed hard + +// restart and recover +log("restart"); +conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8); +log("verify"); +verify(); +log("stop"); +stopMongod(30002); +sleep(5000); + +// at this point, after clean shutdown, there should be no journal files +log("check no journal files"); +checkNoJournalFiles(path2 + "/journal"); + +log("check data matches ns"); +var diff = runDiff(path1 + "/test.ns", path2 + "/test.ns"); +assert(diff == "", "error test.ns files differ"); + +log("check data matches .0"); +var diff = runDiff(path1 + "/test.0", path2 + "/test.0"); +assert(diff == "", "error test.0 files differ"); + +log("check data matches done"); + +var nrows = 0; +for (var i = 0; i < 5; ++i) { + + // durable version + log("restarting 30001"); + conn = startMongodNoReset("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8); + nrows += addRows(); + // wait for group commit. + printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1})); + + verifyRows(nrows); + + // kill the process hard + log("hard kill"); + stopMongod(30001, /*signal*/9); + + sleep(5000); +} + +print(testname + " SUCCESS"); + diff --git a/jstests/dur/md5.js b/jstests/dur/md5.js new file mode 100644 index 0000000..107476e --- /dev/null +++ b/jstests/dur/md5.js @@ -0,0 +1,101 @@ +/** + * Test md5 validation of journal file. + * This test is dependent on the journal file format and may require an update if the format changes, + * see comments near fuzzFile() below. + */ + +var debugging = false; +var testname = "dur_md5"; +var step = 1; +var conn = null; + +function log(str) { + print(); + if(str) + print(testname+" step " + step++ + " " + str); + else + print(testname+" step " + step++); +} + +/** Changes here may require updating the byte index of the md5 hash, see File comments below. */ +function work() { + log("work"); + var d = conn.getDB("test"); + d.foo.insert({ _id: 3, x: 22 }); + d.foo.insert({ _id: 4, x: 22 }); + d.a.insert({ _id: 3, x: 22, y: [1, 2, 3] }); + d.a.insert({ _id: 4, x: 22, y: [1, 2, 3] }); + d.a.update({ _id: 4 }, { $inc: { x: 1} }); + + // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually: + d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 }); + + // d.a.update({ _id: 4 }, { $inc: { x: 1} }); + // d.a.reIndex(); + + // assure writes applied in case we kill -9 on return from this function + d.getLastError(); + + log("endwork"); +} + +if( debugging ) { + // mongod already running in debugger + conn = db.getMongo(); + work(); + sleep(30000); + quit(); +} + +log(); + +var path = "/data/db/" + testname+"dur"; + +log(); +conn = startMongodEmpty("--port", 30001, "--dbpath", path, "--dur", "--smallfiles", "--durOptions", 8); +work(); + +// wait for group commit. +printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
+
+log("kill -9"); + +// kill the process hard +stopMongod(30001, /*signal*/9); + +// journal file should be present, and non-empty as we killed hard + +// Bit flip the first byte of the md5sum contained within the opcode footer.
+// This ensures we get an md5 exception instead of some other type of exception.
+var file = path + "/journal/j._0";
+
+// if test fails, uncomment these "cp" lines to debug:
+// run("cp", file, "/tmp/before");
+
+// journal header is 8192
+// jsectheader is 20
+// so a little beyond that
+fuzzFile(file, 8214+8);
+
+// run("cp", file, "/tmp/after"); + +log("run mongod again recovery should fail"); + +// 100 exit code corresponds to EXIT_UNCAUGHT, which is triggered when there is an exception during recovery. +// 14 is is sometimes triggered instead due to SERVER-2184 +exitCode = runMongoProgram( "mongod", "--port", 30002, "--dbpath", path, "--dur", "--smallfiles", "--durOptions", /*9*/13 );
+
+if (exitCode != 100 && exitCode != 14) {
+ print("\n\n\nFAIL md5.js expected mongod to fail but didn't? mongod exitCode: " + exitCode + "\n\n\n");
+ // sleep a little longer to get more output maybe
+ sleep(2000);
+ assert(false);
+} + +// TODO Possibly we could check the mongod log to verify that the correct type of exception was thrown. But +// that would introduce a dependency on the mongod log format, which we may not want.
+
+print("SUCCESS md5.js");
+
+// if we sleep a littler here we may get more out the mongod output logged
+sleep(500);
diff --git a/jstests/dur/oplog.js b/jstests/dur/oplog.js new file mode 100755 index 0000000..379c1b6 --- /dev/null +++ b/jstests/dur/oplog.js @@ -0,0 +1,159 @@ +/* oplog.js */ + +var debugging = false; +var testname = "oplog"; +var step = 1; +var conn = null;
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
+ }
+}
+ +function runDiff(a, b) { + function reSlash(s) { + var x = s; + if (_isWindows()) { + while (1) { + var y = x.replace('/', '\\'); + if (y == x) + break; + x = y; + } + } + return x; + } + a = reSlash(a); + b = reSlash(b); + print("diff " + a + " " + b); + return runProgram("diff", a, b); +} + +function log(str) { + print(); + if(str) + print(testname+" step " + step++ + " " + str); + else + print(testname+" step " + step++); +} + +function verify() { + log("verify"); + var d = conn.getDB("local"); + var mycount = d.oplog.$main.find({ "o.z": 3 }).count(); + print(mycount); + assert(mycount == 3, "oplog doesnt match"); +} + +// if you do inserts here, you will want to set _id. otherwise they won't match on different +// runs so we can't do a binary diff of the resulting files to check they are consistent. +function work() { + log("work"); + var d = conn.getDB("test"); + var q = conn.getDB("testq"); // use tewo db's to exercise JDbContext a bit. + d.foo.insert({ _id: 3, x: 22 }); + d.foo.insert({ _id: 4, x: 22 }); + q.foo.insert({ _id: 4, x: 22 }); + d.a.insert({ _id: 3, x: 22, y: [1, 2, 3] }); + q.a.insert({ _id: 3, x: 22, y: [1, 2, 3] }); + d.a.insert({ _id: 4, x: 22, y: [1, 2, 3] }); + d.a.update({ _id: 4 }, { $inc: { x: 1} }); + // OpCode_ObjCopy fires on larger operations so make one that isn't tiny + var big = "axxxxxxxxxxxxxxb"; + big = big + big; + big = big + big; + big = big + big; + big = big + big; + big = big + big; + d.foo.insert({ _id: 5, q: "aaaaa", b: big, z: 3 }); + q.foo.insert({ _id: 5, q: "aaaaa", b: big, z: 3 }); + d.foo.insert({ _id: 6, q: "aaaaa", b: big, z: 3 }); + d.foo.update({ _id: 5 }, { $set: { z: 99} }); + + // assure writes applied in case we kill -9 on return from this function + d.getLastError(); + + log("endwork"); + + verify(); +} + +if( debugging ) { + // mongod already running in debugger + print("DOING DEBUG MODE BEHAVIOR AS 'db' IS DEFINED -- RUN mongo --nodb FOR REGULAR TEST BEHAVIOR"); + conn = db.getMongo(); + work(); + sleep(30000); + quit(); +} + +log(); + +// directories +var path1 = "/data/db/" + testname+"nodur"; +var path2 = "/data/db/" + testname+"dur"; + +// non-durable version +log(); +conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur", "--smallfiles", "--master", "--oplogSize", 64); +work(); +stopMongod(30000); + +// durable version +log(); +conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", /*DurParanoid*/8, "--master", "--oplogSize", 64); +work(); + +// wait for group commit. +printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1})); + +// kill the process hard +stopMongod(30001, /*signal*/9); + +// journal file should be present, and non-empty as we killed hard + +// restart and recover +log(); +conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8, "--master", "--oplogSize", 64); +verify(); + +log("stop"); +stopMongod(30002); + +// stopMongod seems to be asynchronous (hmmm) so we sleep here. +sleep(5000); + +// at this point, after clean shutdown, there should be no journal files +log("check no journal files");
+checkNoJournalFiles(path2 + "/journal"); + +log("check data matches ns"); +var diff = runDiff(path1 + "/test.ns", path2 + "/test.ns"); +if (diff != "") { + print("\n\n\nDIFFERS\n"); + print(diff); +} +assert(diff == "", "error test.ns files differ"); + +log("check data matches .0"); +diff = runDiff(path1 + "/test.0", path2 + "/test.0"); +if (diff != "") { + print("\n\n\nDIFFERS\n"); + print(diff); +} +assert(diff == "", "error test.0 files differ"); + +log("check data matches done"); + +print(testname + " SUCCESS"); |