summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/bridge.cpp9
-rw-r--r--tools/dump.cpp134
-rw-r--r--tools/export.cpp74
-rw-r--r--tools/import.cpp327
-rw-r--r--tools/restore.cpp43
-rw-r--r--tools/sniffer.cpp30
-rw-r--r--tools/stat.cpp72
-rw-r--r--tools/tool.cpp82
-rw-r--r--tools/tool.h4
-rw-r--r--tools/top.cpp196
10 files changed, 756 insertions, 215 deletions
diff --git a/tools/bridge.cpp b/tools/bridge.cpp
index 86dea0a..341a1da 100644
--- a/tools/bridge.cpp
+++ b/tools/bridge.cpp
@@ -17,7 +17,8 @@
*/
#include "pch.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
+#include "../util/net/listen.h"
#include "../client/dbclient.h"
#include "../db/dbmessage.h"
@@ -41,7 +42,7 @@ public:
try {
m.reset();
if ( !mp_.recv( m ) ) {
- cout << "end connection " << mp_.farEnd.toString() << endl;
+ cout << "end connection " << mp_.remoteString() << endl;
mp_.shutdown();
break;
}
@@ -87,7 +88,7 @@ set<MessagingPort*> ports;
class MyListener : public Listener {
public:
- MyListener( int port ) : Listener( "", port ) {}
+ MyListener( int port ) : Listener( "bridge" , "", port ) {}
virtual void accepted(MessagingPort *mp) {
ports.insert( mp );
Forwarder f( *mp );
@@ -108,7 +109,7 @@ void cleanup( int sig ) {
void myterminate() {
rawOut( "bridge terminate() called, printing stack:" );
printStackTrace();
- abort();
+ ::abort();
}
void setupSignals() {
diff --git a/tools/dump.cpp b/tools/dump.cpp
index 29553f4..a1690b2 100644
--- a/tools/dump.cpp
+++ b/tools/dump.cpp
@@ -18,6 +18,7 @@
#include "../pch.h"
#include "../client/dbclient.h"
+#include "../db/db.h"
#include "tool.h"
#include <fcntl.h>
@@ -27,6 +28,14 @@ using namespace mongo;
namespace po = boost::program_options;
class Dump : public Tool {
+ class FilePtr : boost::noncopyable {
+ public:
+ /*implicit*/ FilePtr(FILE* f) : _f(f) {}
+ ~FilePtr() { fclose(_f); }
+ operator FILE*() { return _f; }
+ private:
+ FILE* _f;
+ };
public:
Dump() : Tool( "dump" , ALL , "*" , "*" , false ) {
add_options()
@@ -34,15 +43,24 @@ public:
("query,q", po::value<string>() , "json query" )
("oplog", "Use oplog for point-in-time snapshotting" )
("repair", "try to recover a crashed database" )
+ ("forceTableScan", "force a table scan (do not use $snapshot)" )
;
}
// This is a functor that writes a BSONObj to a file
struct Writer {
- Writer(ostream& out, ProgressMeter* m) :_out(out), _m(m) {}
+ Writer(FILE* out, ProgressMeter* m) :_out(out), _m(m) {}
void operator () (const BSONObj& obj) {
- _out.write( obj.objdata() , obj.objsize() );
+ size_t toWrite = obj.objsize();
+ size_t written = 0;
+
+ while (toWrite) {
+ size_t ret = fwrite( obj.objdata()+written, 1, toWrite, _out );
+ uassert(14035, errnoWithPrefix("couldn't write to file"), ret);
+ toWrite -= ret;
+ written += ret;
+ }
// if there's a progress bar, hit it
if (_m) {
@@ -50,21 +68,19 @@ public:
}
}
- ostream& _out;
+ FILE* _out;
ProgressMeter* _m;
};
- void doCollection( const string coll , ostream &out , ProgressMeter *m ) {
- Query q;
- if ( _query.isEmpty() && !hasParam("dbpath"))
- q.snapshot();
- else
- q = _query;
+ void doCollection( const string coll , FILE* out , ProgressMeter *m ) {
+ Query q = _query;
int queryOptions = QueryOption_SlaveOk | QueryOption_NoCursorTimeout;
if (startsWith(coll.c_str(), "local.oplog."))
queryOptions |= QueryOption_OplogReplay;
-
+ else if ( _query.isEmpty() && !hasParam("dbpath") && !hasParam("forceTableScan") )
+ q.snapshot();
+
DBClientBase& connBase = conn(true);
Writer writer(out, m);
@@ -86,21 +102,18 @@ public:
void writeCollectionFile( const string coll , path outputFile ) {
cout << "\t" << coll << " to " << outputFile.string() << endl;
- ofstream out;
- out.open( outputFile.string().c_str() , ios_base::out | ios_base::binary );
- assertStreamGood( 10262 , "couldn't open file" , out );
+ FilePtr f (fopen(outputFile.string().c_str(), "wb"));
+ uassert(10262, errnoWithPrefix("couldn't open file"), f);
ProgressMeter m( conn( true ).count( coll.c_str() , BSONObj() , QueryOption_SlaveOk ) );
- doCollection(coll, out, &m);
+ doCollection(coll, f, &m);
cout << "\t\t " << m.done() << " objects" << endl;
-
- out.close();
}
void writeCollectionStdout( const string coll ) {
- doCollection(coll, cout, NULL);
+ doCollection(coll, stdout, NULL);
}
void go( const string db , const path outdir ) {
@@ -113,10 +126,14 @@ public:
auto_ptr<DBClientCursor> cursor = conn( true ).query( sns.c_str() , Query() , 0 , 0 , 0 , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
while ( cursor->more() ) {
BSONObj obj = cursor->nextSafe();
- if ( obj.toString().find( ".$" ) != string::npos )
+ const string name = obj.getField( "name" ).valuestr();
+
+ // skip namespaces with $ in them only if we don't specify a collection to dump
+ if ( _coll == "*" && name.find( ".$" ) != string::npos ) {
+ log(1) << "\tskipping collection: " << name << endl;
continue;
+ }
- const string name = obj.getField( "name" ).valuestr();
const string filename = name.substr( db.size() + 1 );
if ( _coll != "*" && db + "." + _coll != name && _coll != name )
@@ -139,18 +156,13 @@ public:
return -1;
}
- if ( hasParam( "collection" ) ){
- cout << "repair mode can't work with collection, only on full db" << endl;
- return -1;
- }
-
string dbname = getParam( "db" );
log() << "going to try and recover data from: " << dbname << endl;
return _repair( dbname );
}
- DiskLoc _repairExtent( Database* db , string ns, bool forward , DiskLoc eLoc ){
+ DiskLoc _repairExtent( Database* db , string ns, bool forward , DiskLoc eLoc , Writer& w ){
LogIndentLevel lil;
if ( eLoc.getOfs() <= 0 ){
@@ -170,22 +182,49 @@ public:
LogIndentLevel lil2;
+ set<DiskLoc> seen;
+
DiskLoc loc = forward ? e->firstRecord : e->lastRecord;
while ( ! loc.isNull() ){
+
+ if ( ! seen.insert( loc ).second ) {
+ error() << "infinite loop in extend, seen: " << loc << " before" << endl;
+ break;
+ }
+
if ( loc.getOfs() <= 0 ){
error() << "offset is 0 for record which should be impossible" << endl;
break;
}
- log() << loc << endl;
+ log(1) << loc << endl;
Record* rec = loc.rec();
- log() << loc.obj() << endl;
+ BSONObj obj;
+ try {
+ obj = loc.obj();
+ assert( obj.valid() );
+ LOG(1) << obj << endl;
+ w( obj );
+ }
+ catch ( std::exception& e ) {
+ log() << "found invalid document @ " << loc << " " << e.what() << endl;
+ if ( ! obj.isEmpty() ) {
+ try {
+ BSONElement e = obj.firstElement();
+ stringstream ss;
+ ss << "first element: " << e;
+ log() << ss.str();
+ }
+ catch ( std::exception& ) {
+ }
+ }
+ }
loc = forward ? rec->getNext( loc ) : rec->getPrev( loc );
}
return forward ? e->xnext : e->xprev;
}
- void _repair( Database* db , string ns ){
+ void _repair( Database* db , string ns , path outfile ){
NamespaceDetails * nsd = nsdetails( ns.c_str() );
log() << "nrecords: " << nsd->stats.nrecords
<< " datasize: " << nsd->stats.datasize
@@ -201,36 +240,46 @@ public:
log() << " ERROR fisrtExtent is not valid" << endl;
return;
}
+
+ outfile /= ( ns.substr( ns.find( "." ) + 1 ) + ".bson" );
+ log() << "writing to: " << outfile.string() << endl;
+ FilePtr f (fopen(outfile.string().c_str(), "wb"));
+
+ ProgressMeter m( nsd->stats.nrecords * 2 );
+
+ Writer w( f , &m );
+
try {
log() << "forward extent pass" << endl;
LogIndentLevel lil;
DiskLoc eLoc = nsd->firstExtent;
while ( ! eLoc.isNull() ){
log() << "extent loc: " << eLoc << endl;
- eLoc = _repairExtent( db , ns , true , eLoc );
+ eLoc = _repairExtent( db , ns , true , eLoc , w );
}
}
catch ( DBException& e ){
error() << "forward extent pass failed:" << e.toString() << endl;
}
-
+
try {
log() << "backwards extent pass" << endl;
LogIndentLevel lil;
DiskLoc eLoc = nsd->lastExtent;
while ( ! eLoc.isNull() ){
log() << "extent loc: " << eLoc << endl;
- eLoc = _repairExtent( db , ns , false , eLoc );
+ eLoc = _repairExtent( db , ns , false , eLoc , w );
}
}
catch ( DBException& e ){
error() << "ERROR: backwards extent pass failed:" << e.toString() << endl;
}
+ log() << "\t\t " << m.done() << " objects" << endl;
}
- int _repair( string dbname ){
+ int _repair( string dbname ) {
dblock lk;
Client::Context cx( dbname );
Database * db = cx.db();
@@ -238,16 +287,28 @@ public:
list<string> namespaces;
db->namespaceIndex.getNamespaces( namespaces );
+ path root = getParam( "out" );
+ root /= dbname;
+ create_directories( root );
+
for ( list<string>::iterator i=namespaces.begin(); i!=namespaces.end(); ++i ){
LogIndentLevel lil;
string ns = *i;
+
if ( str::endsWith( ns , ".system.namespaces" ) )
continue;
+
+ if ( str::contains( ns , ".tmp.mr." ) )
+ continue;
+
+ if ( _coll != "*" && ! str::endsWith( ns , _coll ) )
+ continue;
+
log() << "trying to recover: " << ns << endl;
LogIndentLevel lil2;
try {
- _repair( db , ns );
+ _repair( db , ns , root );
}
catch ( DBException& e ){
log() << "ERROR recovering: " << ns << " " << e.toString() << endl;
@@ -318,12 +379,7 @@ public:
}
}
- {
- // TODO: when mongos supports QueryOption_Exaust add a version check (SERVER-2628)
- BSONObj isdbgrid;
- conn("true").simpleCommand("admin", &isdbgrid, "isdbgrid");
- _usingMongos = isdbgrid["isdbgrid"].trueValue();
- }
+ _usingMongos = isMongos();
path root( out );
string db = _db;
diff --git a/tools/export.cpp b/tools/export.cpp
index 0262c4b..c3a5420 100644
--- a/tools/export.cpp
+++ b/tools/export.cpp
@@ -40,10 +40,78 @@ public:
("csv","export to csv instead of json")
("out,o", po::value<string>(), "output file; if not specified, stdout is used")
("jsonArray", "output to a json array rather than one object per line")
+ ("slaveOk,k", po::value<bool>()->default_value(true) , "use secondaries for export if available, default true")
;
_usesstdout = false;
}
+ // Turn every double quote character into two double quote characters
+ // If hasSurroundingQuotes is true, doesn't escape the first and last
+ // characters of the string, if it's false, add a double quote character
+ // around the whole string.
+ string csvEscape(string str, bool hasSurroundingQuotes = false) {
+ size_t index = hasSurroundingQuotes ? 1 : 0;
+ while (((index = str.find('"', index)) != string::npos)
+ && (index < (hasSurroundingQuotes ? str.size() - 1 : str.size()))) {
+ str.replace(index, 1, "\"\"");
+ index += 2;
+ }
+ return hasSurroundingQuotes ? str : "\"" + str + "\"";
+ }
+
+ // Gets the string representation of a BSON object that can be correctly written to a CSV file
+ string csvString (const BSONElement& object) {
+ const char* binData; // Only used with BinData type
+
+ switch (object.type()) {
+ case MinKey:
+ return "$MinKey";
+ case MaxKey:
+ return "$MaxKey";
+ case NumberInt:
+ case NumberDouble:
+ case NumberLong:
+ case Bool:
+ return object.toString(false);
+ case String:
+ case Symbol:
+ return csvEscape(object.toString(false), true);
+ case Object:
+ return csvEscape(object.jsonString(Strict, false));
+ case Array:
+ return csvEscape(object.jsonString(Strict, false));
+ case BinData:
+ int len;
+ binData = object.binDataClean(len);
+ return toHex(binData, len);
+ case jstOID:
+ return "ObjectID(" + object.OID().toString() + ")"; // OIDs are always 24 bytes
+ case Date:
+ return timeToISOString(object.Date() / 1000);
+ case Timestamp:
+ return csvEscape(object.jsonString(Strict, false));
+ case RegEx:
+ return csvEscape("/" + string(object.regex()) + "/" + string(object.regexFlags()));
+ case Code:
+ return csvEscape(object.toString(false));
+ case CodeWScope:
+ if (string(object.codeWScopeScopeData()) == "") {
+ return csvEscape(object.toString(false));
+ } else {
+ return csvEscape(object.jsonString(Strict, false));
+ }
+ case EOO:
+ case Undefined:
+ case DBRef:
+ case jstNULL:
+ cerr << "Invalid BSON object type for CSV output: " << object.type() << endl;
+ return "";
+ }
+ // Can never get here
+ assert(false);
+ return "";
+ }
+
int run() {
string ns;
const bool csv = hasParam( "csv" );
@@ -110,7 +178,9 @@ public:
if ( q.getFilter().isEmpty() && !hasParam("dbpath"))
q.snapshot();
- auto_ptr<DBClientCursor> cursor = conn().query( ns.c_str() , q , 0 , 0 , fieldsToReturn , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
+ bool slaveOk = _params["slaveOk"].as<bool>();
+
+ auto_ptr<DBClientCursor> cursor = conn().query( ns.c_str() , q , 0 , 0 , fieldsToReturn , ( slaveOk ? QueryOption_SlaveOk : 0 ) | QueryOption_NoCursorTimeout );
if ( csv ) {
for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ) {
@@ -134,7 +204,7 @@ public:
out << ",";
const BSONElement & e = obj.getFieldDotted(i->c_str());
if ( ! e.eoo() ) {
- out << e.jsonString( Strict , false );
+ out << csvString(e);
}
}
out << endl;
diff --git a/tools/import.cpp b/tools/import.cpp
index 6b59bdc..16980b0 100644
--- a/tools/import.cpp
+++ b/tools/import.cpp
@@ -27,6 +27,7 @@
#include <iostream>
#include <boost/program_options.hpp>
+#include <boost/algorithm/string.hpp>
using namespace mongo;
@@ -44,100 +45,215 @@ class Import : public Tool {
bool _doimport;
bool _jsonArray;
vector<string> _upsertFields;
+ static const int BUF_SIZE = 1024 * 1024 * 4;
+
+ string trimWhitespace(const string& str) {
+ if (str.size() == 0) {
+ return str;
+ }
+ size_t begin = 0;
+ size_t end = str.size() - 1;
+ while (begin < str.size() && isspace(str[begin])) { ++begin; } // Finds index of first non-whitespace character
+ while (end > 0 && isspace(str[end])) { --end; } // Finds index of last non-whitespace character
+ return str.substr(begin, end - begin + 1);
+ }
+
+ void csvTokenizeRow(const string& row, vector<string>& tokens) {
+ bool inQuotes = false;
+ bool prevWasQuote = false;
+ bool tokenQuoted = false;
+ string curtoken = "";
+ for (string::const_iterator it = row.begin(); it != row.end(); ++it) {
+ char element = *it;
+ if (element == '"') {
+ if (!inQuotes) {
+ inQuotes = true;
+ tokenQuoted = true;
+ curtoken = "";
+ } else {
+ if (prevWasQuote) {
+ curtoken += "\"";
+ prevWasQuote = false;
+ } else {
+ prevWasQuote = true;
+ }
+ }
+ } else {
+ if (inQuotes && prevWasQuote) {
+ inQuotes = false;
+ prevWasQuote = false;
+ tokens.push_back(curtoken);
+ }
+
+ if (element == ',' && !inQuotes) {
+ if (!tokenQuoted) { // If token was quoted, it's already been added
+ tokens.push_back(trimWhitespace(curtoken));
+ }
+ curtoken = "";
+ tokenQuoted = false;
+ } else {
+ curtoken += element;
+ }
+ }
+ }
+ if (!tokenQuoted || (inQuotes && prevWasQuote)) {
+ tokens.push_back(trimWhitespace(curtoken));
+ }
+ }
void _append( BSONObjBuilder& b , const string& fieldName , const string& data ) {
- if ( b.appendAsNumber( fieldName , data ) )
+ if ( _ignoreBlanks && data.size() == 0 )
return;
- if ( _ignoreBlanks && data.size() == 0 )
+ if ( b.appendAsNumber( fieldName , data ) )
return;
// TODO: other types?
- b.append( fieldName , data );
+ b.append ( fieldName , data );
+ }
+
+ /*
+ * Reads one line from in into buf.
+ * Returns the number of bytes that should be skipped - the caller should
+ * increment buf by this amount.
+ */
+ int getLine(istream* in, char* buf) {
+ if (_jsonArray) {
+ in->read(buf, BUF_SIZE);
+ uassert(13295, "JSONArray file too large", (in->rdstate() & ios_base::eofbit));
+ buf[ in->gcount() ] = '\0';
+ }
+ else {
+ in->getline( buf , BUF_SIZE );
+ log(1) << "got line:" << buf << endl;
+ }
+ uassert( 10263 , "unknown error reading file" ,
+ (!(in->rdstate() & ios_base::badbit)) &&
+ (!(in->rdstate() & ios_base::failbit) || (in->rdstate() & ios_base::eofbit)) );
+
+ int numBytesSkipped = 0;
+ if (strncmp("\xEF\xBB\xBF", buf, 3) == 0) { // UTF-8 BOM (notepad is stupid)
+ buf += 3;
+ numBytesSkipped += 3;
+ }
+
+ uassert(13289, "Invalid UTF8 character detected", isValidUTF8(buf));
+ return numBytesSkipped;
}
- BSONObj parseLine( char * line ) {
- uassert(13289, "Invalid UTF8 character detected", isValidUTF8(line));
+ /*
+ * Parses a BSON object out of a JSON array.
+ * Returns number of bytes processed on success and -1 on failure.
+ */
+ int parseJSONArray(char* buf, BSONObj& o) {
+ int len = 0;
+ while (buf[0] != '{' && buf[0] != '\0') {
+ len++;
+ buf++;
+ }
+ if (buf[0] == '\0')
+ return -1;
+
+ int jslen;
+ o = fromjson(buf, &jslen);
+ len += jslen;
- if ( _type == JSON ) {
+ return len;
+ }
+
+ /*
+ * Parses one object from the input file. This usually corresponds to one line in the input
+ * file, unless the file is a CSV and contains a newline within a quoted string entry.
+ * Returns a true if a BSONObj was successfully created and false if not.
+ */
+ bool parseRow(istream* in, BSONObj& o, int& numBytesRead) {
+ boost::scoped_array<char> buffer(new char[BUF_SIZE+2]);
+ char* line = buffer.get();
+
+ numBytesRead = getLine(in, line);
+ line += numBytesRead;
+
+ if (line[0] == '\0') {
+ return false;
+ }
+ numBytesRead += strlen( line );
+
+ if (_type == JSON) {
+ // Strip out trailing whitespace
char * end = ( line + strlen( line ) ) - 1;
- while ( isspace(*end) ) {
+ while ( end >= line && isspace(*end) ) {
*end = 0;
end--;
}
- return fromjson( line );
+ o = fromjson( line );
+ return true;
}
- BSONObjBuilder b;
+ vector<string> tokens;
+ if (_type == CSV) {
+ string row;
+ bool inside_quotes = false;
+ size_t last_quote = 0;
+ while (true) {
+ string lineStr(line);
+ // Deal with line breaks in quoted strings
+ last_quote = lineStr.find_first_of('"');
+ while (last_quote != string::npos) {
+ inside_quotes = !inside_quotes;
+ last_quote = lineStr.find_first_of('"', last_quote+1);
+ }
- unsigned int pos=0;
- while ( line[0] ) {
- string name;
- if ( pos < _fields.size() ) {
- name = _fields[pos];
+ row.append(lineStr);
+
+ if (inside_quotes) {
+ row.append("\n");
+ int num = getLine(in, line);
+ line += num;
+ numBytesRead += num;
+
+ uassert (15854, "CSV file ends while inside quoted field", line[0] != '\0');
+ numBytesRead += strlen( line );
+ } else {
+ break;
+ }
}
- else {
- stringstream ss;
- ss << "field" << pos;
- name = ss.str();
+ // now 'row' is string corresponding to one row of the CSV file
+ // (which may span multiple lines) and represents one BSONObj
+ csvTokenizeRow(row, tokens);
+ }
+ else { // _type == TSV
+ while (line[0] != '\t' && isspace(line[0])) { // Strip leading whitespace, but not tabs
+ line++;
}
- pos++;
-
- bool done = false;
- string data;
- char * end;
- if ( _type == CSV && line[0] == '"' ) {
- line++; //skip first '"'
-
- while (true) {
- end = strchr( line , '"' );
- if (!end) {
- data += line;
- done = true;
- break;
- }
- else if (end[1] == '"') {
- // two '"'s get appended as one
- data.append(line, end-line+1); //include '"'
- line = end+2; //skip both '"'s
- }
- else if (end[-1] == '\\') {
- // "\\\"" gets appended as '"'
- data.append(line, end-line-1); //exclude '\\'
- data.append("\"");
- line = end+1; //skip the '"'
- }
- else {
- data.append(line, end-line);
- line = end+2; //skip '"' and ','
- break;
- }
- }
+
+ boost::split(tokens, line, boost::is_any_of(_sep));
+ }
+
+ // Now that the row is tokenized, create a BSONObj out of it.
+ BSONObjBuilder b;
+ unsigned int pos=0;
+ for (vector<string>::iterator it = tokens.begin(); it != tokens.end(); ++it) {
+ string token = *it;
+ if ( _headerLine ) {
+ _fields.push_back(token);
}
else {
- end = strstr( line , _sep );
- if ( ! end ) {
- done = true;
- data = string( line );
+ string name;
+ if ( pos < _fields.size() ) {
+ name = _fields[pos];
}
else {
- data = string( line , end - line );
- line = end+1;
+ stringstream ss;
+ ss << "field" << pos;
+ name = ss.str();
}
- }
+ pos++;
- if ( _headerLine ) {
- while ( isspace( data[0] ) )
- data = data.substr( 1 );
- _fields.push_back( data );
+ _append( b , name , token );
}
- else
- _append( b , name , data );
-
- if ( done )
- break;
}
- return b.obj();
+ o = b.obj();
+ return true;
}
public:
@@ -255,68 +371,37 @@ public:
_jsonArray = true;
}
- int errors = 0;
-
- int num = 0;
-
time_t start = time(0);
-
log(1) << "filesize: " << fileSize << endl;
ProgressMeter pm( fileSize );
- const int BUF_SIZE = 1024 * 1024 * 4;
- boost::scoped_array<char> line(new char[BUF_SIZE+2]);
- char * buf = line.get();
- while ( _jsonArray || in->rdstate() == 0 ) {
- if (_jsonArray) {
- if (buf == line.get()) { //first pass
- in->read(buf, BUF_SIZE);
- uassert(13295, "JSONArray file too large", (in->rdstate() & ios_base::eofbit));
- buf[ in->gcount() ] = '\0';
- }
- }
- else {
- buf = line.get();
- in->getline( buf , BUF_SIZE );
- log(1) << "got line:" << buf << endl;
- }
- uassert( 10263 , "unknown error reading file" ,
- (!(in->rdstate() & ios_base::badbit)) &&
- (!(in->rdstate() & ios_base::failbit) || (in->rdstate() & ios_base::eofbit)) );
-
- int len = 0;
- if (strncmp("\xEF\xBB\xBF", buf, 3) == 0) { // UTF-8 BOM (notepad is stupid)
- buf += 3;
- len += 3;
- }
-
- if (_jsonArray) {
- while (buf[0] != '{' && buf[0] != '\0') {
- len++;
- buf++;
- }
- if (buf[0] == '\0')
- break;
- }
- else {
- while (isspace( buf[0] )) {
- len++;
- buf++;
- }
- if (buf[0] == '\0')
- continue;
- len += strlen( buf );
- }
+ int num = 0;
+ int errors = 0;
+ int len = 0;
+ // buffer and line are only used when parsing a jsonArray
+ boost::scoped_array<char> buffer(new char[BUF_SIZE+2]);
+ char* line = buffer.get();
+ while ( _jsonArray || in->rdstate() == 0 ) {
try {
BSONObj o;
if (_jsonArray) {
- int jslen;
- o = fromjson(buf, &jslen);
- len += jslen;
- buf += jslen;
+ int bytesProcessed = 0;
+ if (line == buffer.get()) { // Only read on first pass - the whole array must be on one line.
+ bytesProcessed = getLine(in, line);
+ line += bytesProcessed;
+ len += bytesProcessed;
+ }
+ if ((bytesProcessed = parseJSONArray(line, o)) < 0) {
+ len += bytesProcessed;
+ break;
+ }
+ len += bytesProcessed;
+ line += len;
}
else {
- o = parseLine( buf );
+ if (!parseRow(in, o, len)) {
+ continue;
+ }
}
if ( _headerLine ) {
@@ -348,7 +433,7 @@ public:
}
catch ( std::exception& e ) {
cout << "exception:" << e.what() << endl;
- cout << buf << endl;
+ cout << line << endl;
errors++;
if (hasParam("stopOnError") || _jsonArray)
diff --git a/tools/restore.cpp b/tools/restore.cpp
index 9a18c00..c08c14f 100644
--- a/tools/restore.cpp
+++ b/tools/restore.cpp
@@ -1,4 +1,4 @@
-// restore.cpp
+// @file restore.cpp
/**
* Copyright (C) 2008 10gen Inc.
@@ -25,6 +25,7 @@
#include <boost/program_options.hpp>
#include <fcntl.h>
+#include <set>
using namespace mongo;
@@ -38,13 +39,16 @@ class Restore : public BSONTool {
public:
bool _drop;
+ bool _keepIndexVersion;
string _curns;
string _curdb;
+ set<string> _users; // For restoring users with --drop
Restore() : BSONTool( "restore" ) , _drop(false) {
add_options()
("drop" , "drop each collection before import" )
("oplogReplay" , "replay oplog for point-in-time restore")
+ ("keepIndexVersion" , "don't upgrade indexes to newest version")
;
add_hidden_options()
("dir", po::value<string>()->default_value("dump"), "directory to restore from")
@@ -67,6 +71,7 @@ public:
}
_drop = hasParam( "drop" );
+ _keepIndexVersion = hasParam("keepIndexVersion");
bool doOplog = hasParam( "oplogReplay" );
if (doOplog) {
@@ -168,7 +173,7 @@ public:
if ( ! ( endsWith( root.string().c_str() , ".bson" ) ||
endsWith( root.string().c_str() , ".bin" ) ) ) {
- cerr << "don't know what to do with [" << root.string() << "]" << endl;
+ cerr << "don't know what to do with file [" << root.string() << "]" << endl;
return;
}
@@ -208,13 +213,31 @@ public:
out() << "\t going into namespace [" << ns << "]" << endl;
if ( _drop ) {
- out() << "\t dropping" << endl;
- conn().dropCollection( ns );
+ if (root.leaf() != "system.users.bson" ) {
+ out() << "\t dropping" << endl;
+ conn().dropCollection( ns );
+ } else {
+ // Create map of the users currently in the DB
+ BSONObj fields = BSON("user" << 1);
+ scoped_ptr<DBClientCursor> cursor(conn().query(ns, Query(), 0, 0, &fields));
+ while (cursor->more()) {
+ BSONObj user = cursor->next();
+ _users.insert(user["user"].String());
+ }
+ }
}
_curns = ns.c_str();
_curdb = NamespaceString(_curns).db;
processFile( root );
+ if (_drop && root.leaf() == "system.users.bson") {
+ // Delete any users that used to exist but weren't in the dump file
+ for (set<string>::iterator it = _users.begin(); it != _users.end(); ++it) {
+ BSONObj userMatch = BSON("user" << *it);
+ conn().remove(ns, Query(userMatch));
+ }
+ _users.clear();
+ }
}
virtual void gotObject( const BSONObj& obj ) {
@@ -245,7 +268,7 @@ public:
string s = _curdb + "." + n.coll;
bo.append("ns", s);
}
- else {
+ else if (strcmp(e.fieldName(), "v") != 0 || _keepIndexVersion) { // Remove index version number
bo.append(e);
}
}
@@ -257,10 +280,16 @@ public:
cerr << "Error creating index " << o["ns"].String();
cerr << ": " << err["code"].Int() << " " << err["err"].String() << endl;
cerr << "To resume index restoration, run " << _name << " on file" << _fileName << " manually." << endl;
- abort();
+ ::abort();
}
}
- else {
+ else if (_drop && endsWith(_curns.c_str(), ".system.users") && _users.count(obj["user"].String())) {
+ // Since system collections can't be dropped, we have to manually
+ // replace the contents of the system.users collection
+ BSONObj userMatch = BSON("user" << obj["user"].String());
+ conn().update(_curns, Query(userMatch), obj);
+ _users.erase(obj["user"].String());
+ } else {
conn().insert( _curns , obj );
}
}
diff --git a/tools/sniffer.cpp b/tools/sniffer.cpp
index 0422f87..aeab808 100644
--- a/tools/sniffer.cpp
+++ b/tools/sniffer.cpp
@@ -26,7 +26,7 @@
killcursors
*/
-
+#include "../pch.h"
#include <pcap.h>
#ifdef _WIN32
@@ -35,7 +35,7 @@
#endif
#include "../bson/util/builder.h"
-#include "../util/message.h"
+#include "../util/net/message.h"
#include "../util/mmap.h"
#include "../db/dbmessage.h"
#include "../client/dbclient.h"
@@ -69,6 +69,11 @@ using mongo::DBClientConnection;
using mongo::QueryResult;
using mongo::MemoryMappedFile;
+mongo::CmdLine mongo::cmdLine;
+namespace mongo {
+ void setupSignals( bool inFork ){}
+}
+
#define SNAP_LEN 65535
int captureHeaderSize;
@@ -99,7 +104,10 @@ struct sniff_ip {
#define IP_V(ip) (((ip)->ip_vhl) >> 4)
/* TCP header */
-typedef u_int32_t tcp_seq;
+#ifdef _WIN32
+typedef unsigned __int32 uint32_t;
+#endif
+typedef uint32_t tcp_seq;
struct sniff_tcp {
u_short th_sport; /* source port */
@@ -271,7 +279,7 @@ void processMessage( Connection& c , Message& m ) {
if ( m.operation() == mongo::opReply )
out() << " - " << (unsigned)m.header()->responseTo;
- out() << endl;
+ out() << '\n';
try {
switch( m.operation() ) {
@@ -279,14 +287,23 @@ void processMessage( Connection& c , Message& m ) {
mongo::QueryResult* r = (mongo::QueryResult*)m.singleData();
out() << "\treply" << " n:" << r->nReturned << " cursorId: " << r->cursorId << endl;
if ( r->nReturned ) {
- mongo::BSONObj o( r->data() , 0 );
+ mongo::BSONObj o( r->data() );
out() << "\t" << o << endl;
}
break;
}
case mongo::dbQuery: {
mongo::QueryMessage q(d);
- out() << "\tquery: " << q.query << " ntoreturn: " << q.ntoreturn << " ntoskip: " << q.ntoskip << endl;
+ out() << "\tquery: " << q.query << " ntoreturn: " << q.ntoreturn << " ntoskip: " << q.ntoskip;
+ if( !q.fields.isEmpty() )
+ out() << " hasfields";
+ if( q.queryOptions & mongo::QueryOption_SlaveOk )
+ out() << " SlaveOk";
+ if( q.queryOptions & mongo::QueryOption_NoCursorTimeout )
+ out() << " NoCursorTimeout";
+ if( q.queryOptions & ~(mongo::QueryOption_SlaveOk | mongo::QueryOption_NoCursorTimeout) )
+ out() << " queryOptions:" << hex << q.queryOptions;
+ out() << endl;
break;
}
case mongo::dbUpdate: {
@@ -323,6 +340,7 @@ void processMessage( Connection& c , Message& m ) {
break;
}
default:
+ out() << "\tunknown opcode " << m.operation() << endl;
cerr << "*** CANNOT HANDLE TYPE: " << m.operation() << endl;
}
}
diff --git a/tools/stat.cpp b/tools/stat.cpp
index fa6be31..7483222 100644
--- a/tools/stat.cpp
+++ b/tools/stat.cpp
@@ -19,14 +19,11 @@
#include "pch.h"
#include "client/dbclient.h"
#include "db/json.h"
-#include "../util/httpclient.h"
+#include "../util/net/httpclient.h"
#include "../util/text.h"
-
#include "tool.h"
-
#include <fstream>
#include <iostream>
-
#include <boost/program_options.hpp>
namespace po = boost::program_options;
@@ -65,24 +62,31 @@ namespace mongo {
virtual void printExtraHelpAfter( ostream & out ) {
out << "\n";
out << " Fields\n";
- out << " inserts \t- # of inserts per second\n";
- out << " query \t- # of queries per second\n";
- out << " update \t- # of updates per second\n";
- out << " delete \t- # of deletes per second\n";
- out << " getmore \t- # of get mores (cursor batch) per second\n";
- out << " command \t- # of commands per second\n";
- out << " flushes \t- # of fsync flushes per second\n";
- out << " mapped \t- amount of data mmaped (total data size) megabytes\n";
- out << " visze \t- virtual size of process in megabytes\n";
- out << " res \t- resident size of process in megabytes\n";
- out << " faults \t- # of pages faults per sec (linux only)\n";
- out << " locked \t- percent of time in global write lock\n";
- out << " idx miss \t- percent of btree page misses (sampled)\n";
- out << " qr|qw \t- queue lengths for clients waiting (read|write)\n";
- out << " ar|aw \t- active clients (read|write)\n";
- out << " netIn \t- network traffic in - bits\n";
- out << " netOut \t- network traffic out - bits\n";
- out << " conn \t- number of open connections\n";
+ out << " inserts \t- # of inserts per second (* means replicated op)\n";
+ out << " query \t- # of queries per second\n";
+ out << " update \t- # of updates per second\n";
+ out << " delete \t- # of deletes per second\n";
+ out << " getmore \t- # of get mores (cursor batch) per second\n";
+ out << " command \t- # of commands per second, on a slave its local|replicated\n";
+ out << " flushes \t- # of fsync flushes per second\n";
+ out << " mapped \t- amount of data mmaped (total data size) megabytes\n";
+ out << " vsize \t- virtual size of process in megabytes\n";
+ out << " res \t- resident size of process in megabytes\n";
+ out << " faults \t- # of pages faults per sec (linux only)\n";
+ out << " locked \t- percent of time in global write lock\n";
+ out << " idx miss \t- percent of btree page misses (sampled)\n";
+ out << " qr|qw \t- queue lengths for clients waiting (read|write)\n";
+ out << " ar|aw \t- active clients (read|write)\n";
+ out << " netIn \t- network traffic in - bits\n";
+ out << " netOut \t- network traffic out - bits\n";
+ out << " conn \t- number of open connections\n";
+ out << " set \t- replica set name\n";
+ out << " repl \t- replication type \n";
+ out << " \t M - master\n";
+ out << " \t SEC - secondary\n";
+ out << " \t REC - recovering\n";
+ out << " \t UNK - unknown\n";
+ out << " \t SLV - slave\n";
}
@@ -196,6 +200,8 @@ namespace mongo {
BSONObj doRow( const BSONObj& a , const BSONObj& b ) {
BSONObjBuilder result;
+ bool isMongos = b["shardCursorType"].type() == Object; // TODO: should have a better check
+
if ( a["opcounters"].isABSONObj() && b["opcounters"].isABSONObj() ) {
BSONObj ax = a["opcounters"].embeddedObject();
BSONObj bx = b["opcounters"].embeddedObject();
@@ -251,11 +257,12 @@ namespace mongo {
if ( b.getFieldDotted("mem.supported").trueValue() ) {
BSONObj bx = b["mem"].embeddedObject();
BSONObjIterator i( bx );
- _appendMem( result , "mapped" , 6 , bx["mapped"].numberInt() );
+ if (!isMongos)
+ _appendMem( result , "mapped" , 6 , bx["mapped"].numberInt() );
_appendMem( result , "vsize" , 6 , bx["virtual"].numberInt() );
_appendMem( result , "res" , 6 , bx["resident"].numberInt() );
- if ( _all )
+ if ( !isMongos && _all )
_appendMem( result , "non-mapped" , 6 , bx["virtual"].numberInt() - bx["mapped"].numberInt() );
}
@@ -266,8 +273,10 @@ namespace mongo {
_append( result , "faults" , 6 , (int)diff( "page_faults" , ax , bx ) );
}
- _append( result , "locked %" , 8 , percent( "globalLock.totalTime" , "globalLock.lockTime" , a , b ) );
- _append( result , "idx miss %" , 8 , percent( "indexCounters.btree.accesses" , "indexCounters.btree.misses" , a , b ) );
+ if (!isMongos) {
+ _append( result , "locked %" , 8 , percent( "globalLock.totalTime" , "globalLock.lockTime" , a , b ) );
+ _append( result , "idx miss %" , 8 , percent( "indexCounters.btree.accesses" , "indexCounters.btree.misses" , a , b ) );
+ }
if ( b.getFieldDotted( "globalLock.currentQueue" ).type() == Object ) {
int r = b.getFieldDotted( "globalLock.currentQueue.readers" ).numberInt();
@@ -320,9 +329,7 @@ namespace mongo {
_append( result , "repl" , 4 , ss.str() );
}
- else if ( b["shardCursorType"].type() == Object ) {
- // is a mongos
- // TODO: should have a better check
+ else if ( isMongos ) {
_append( result , "repl" , 4 , "RTR" );
}
@@ -353,12 +360,16 @@ namespace mongo {
}
if ( hasParam( "discover" ) ) {
- _noconnection = true;
_many = true;
}
}
int run() {
+ if ( !(_username.empty() || _password.empty()) && isMongos()) {
+ cout << "You cannot use mongostat on a mongos running with authentication enabled" << endl;
+ return -1;
+ }
+
_sleep = getParam( "sleep" , _sleep );
_all = hasParam( "all" );
if ( _many )
@@ -593,7 +604,6 @@ namespace mongo {
int runMany() {
StateMap threads;
-
{
string orig = getParam( "host" );
if ( orig == "" )
diff --git a/tools/tool.cpp b/tools/tool.cpp
index 54dc5df..e8c23d5 100644
--- a/tools/tool.cpp
+++ b/tools/tool.cpp
@@ -21,10 +21,11 @@
#include <iostream>
#include <boost/filesystem/operations.hpp>
-#include <pcrecpp.h>
+#include "pcrecpp.h"
#include "util/file_allocator.h"
#include "util/password.h"
+#include "util/version.h"
using namespace std;
using namespace mongo;
@@ -44,6 +45,7 @@ namespace mongo {
_options->add_options()
("help","produce help message")
("verbose,v", "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
+ ("version", "print the program's version and exit" )
;
if ( access & REMOTE_SERVER )
@@ -51,6 +53,9 @@ namespace mongo {
("host,h",po::value<string>(), "mongo host to connect to ( <set name>/s1,s2 for sets)" )
("port",po::value<string>(), "server port. Can also use --host hostname:port" )
("ipv6", "enable IPv6 support (disabled by default)")
+#ifdef MONGO_SSL
+ ("ssl", "use all for connections")
+#endif
("username,u",po::value<string>(), "username" )
("password,p", new PasswordValue( &_password ), "password" )
@@ -63,6 +68,7 @@ namespace mongo {
"server - needs to lock the data directory, so cannot be "
"used if a mongod is currently accessing the same path" )
("directoryperdb", "if dbpath specified, each db is in a separate directory" )
+ ("journal", "enable journaling" )
;
if ( access & SPECIFY_DBCOL )
@@ -92,6 +98,12 @@ namespace mongo {
printExtraHelpAfter(out);
}
+ void Tool::printVersion(ostream &out) {
+ out << _name << " version " << mongo::versionString;
+ if (mongo::versionString[strlen(mongo::versionString)-1] == '-')
+ out << " (commit " << mongo::gitVersion() << ")";
+ out << endl;
+ }
int Tool::main( int argc , char ** argv ) {
static StaticObserver staticObserver;
@@ -146,6 +158,11 @@ namespace mongo {
return 0;
}
+ if ( _params.count( "version" ) ) {
+ printVersion(cout);
+ return 0;
+ }
+
if ( _params.count( "verbose" ) ) {
logLevel = 1;
}
@@ -156,6 +173,13 @@ namespace mongo {
}
}
+
+#ifdef MONGO_SSL
+ if (_params.count("ssl")) {
+ mongo::cmdLine.sslOnNormalPorts = true;
+ }
+#endif
+
preSetup();
bool useDirectClient = hasParam( "dbpath" );
@@ -195,6 +219,11 @@ namespace mongo {
directoryperdb = true;
}
assert( lastError.get( true ) );
+
+ if (_params.count("journal")){
+ cmdLine.dur = true;
+ }
+
Client::initThread("tools");
_conn = new DBDirectClient();
_host = "DIRECT";
@@ -212,6 +241,8 @@ namespace mongo {
}
FileAllocator::get()->start();
+
+ dur::startup();
}
if ( _params.count( "db" ) )
@@ -239,6 +270,33 @@ namespace mongo {
cerr << "assertion: " << e.toString() << endl;
ret = -1;
}
+ catch(const boost::filesystem::filesystem_error &fse) {
+ /*
+ https://jira.mongodb.org/browse/SERVER-2904
+
+ Simple tools that don't access the database, such as
+ bsondump, aren't throwing DBExceptions, but are throwing
+ boost exceptions.
+
+ The currently available set of error codes don't seem to match
+ boost documentation. boost::filesystem::not_found_error
+ (from http://www.boost.org/doc/libs/1_31_0/libs/filesystem/doc/exception.htm)
+ doesn't seem to exist in our headers. Also, fse.code() isn't
+ boost::system::errc::no_such_file_or_directory when this
+ happens, as you would expect. And, determined from
+ experimentation that the command-line argument gets turned into
+ "\\?" instead of "/?" !!!
+ */
+#if defined(_WIN32)
+ if (/*(fse.code() == boost::system::errc::no_such_file_or_directory) &&*/
+ (fse.path1() == "\\?"))
+ printHelp(cerr);
+ else
+#endif // _WIN32
+ cerr << "error: " << fse.what() << endl;
+
+ ret = -1;
+ }
if ( currentClient.get() )
currentClient->shutdown();
@@ -275,6 +333,13 @@ namespace mongo {
return true;
}
+ bool Tool::isMongos() {
+ // TODO: when mongos supports QueryOption_Exaust add a version check (SERVER-2628)
+ BSONObj isdbgrid;
+ conn("true").simpleCommand("admin", &isdbgrid, "isdbgrid");
+ return isdbgrid["isdbgrid"].trueValue();
+ }
+
void Tool::addFieldOptions() {
add_options()
("fields,f" , po::value<string>() , "comma separated list of field names e.g. -f name,age" )
@@ -332,8 +397,15 @@ namespace mongo {
if ( ! dbname.size() )
dbname = _db;
- if ( ! ( _username.size() || _password.size() ) )
+ if ( ! ( _username.size() || _password.size() ) ) {
+ // Make sure that we don't need authentication to connect to this db
+ // findOne throws an AssertionException if it's not authenticated.
+ if (_coll.size() > 0) {
+ // BSONTools don't have a collection
+ conn().findOne(getNS(), Query("{}"));
+ }
return;
+ }
string errmsg;
if ( _conn->auth( dbname , _username , _password , errmsg ) )
@@ -348,7 +420,7 @@ namespace mongo {
}
BSONTool::BSONTool( const char * name, DBAccess access , bool objcheck )
- : Tool( name , access , "" , "" ) , _objcheck( objcheck ) {
+ : Tool( name , access , "" , "" , false ) , _objcheck( objcheck ) {
add_options()
("objcheck" , "validate object before inserting" )
@@ -441,9 +513,9 @@ namespace mongo {
fclose( file );
uassert( 10265 , "counts don't match" , m.done() == fileLength );
- out() << "\t " << m.hits() << " objects found" << endl;
+ (_usesstdout ? cout : cerr ) << m.hits() << " objects found" << endl;
if ( _matcher.get() )
- out() << "\t " << processed << " objects processed" << endl;
+ (_usesstdout ? cout : cerr ) << processed << " objects processed" << endl;
return processed;
}
diff --git a/tools/tool.h b/tools/tool.h
index f6124b8..e6694f3 100644
--- a/tools/tool.h
+++ b/tools/tool.h
@@ -28,6 +28,7 @@
#include "client/dbclient.h"
#include "db/instance.h"
+#include "db/matcher.h"
using std::string;
@@ -82,6 +83,7 @@ namespace mongo {
}
bool isMaster();
+ bool isMongos();
virtual void preSetup() {}
@@ -92,6 +94,8 @@ namespace mongo {
virtual void printExtraHelp( ostream & out ) {}
virtual void printExtraHelpAfter( ostream & out ) {}
+ virtual void printVersion(ostream &out);
+
protected:
mongo::DBClientBase &conn( bool slaveIfPaired = false );
diff --git a/tools/top.cpp b/tools/top.cpp
new file mode 100644
index 0000000..42e4568
--- /dev/null
+++ b/tools/top.cpp
@@ -0,0 +1,196 @@
+// stat.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "client/dbclient.h"
+#include "db/json.h"
+#include "../util/text.h"
+#include "tool.h"
+#include <fstream>
+#include <iostream>
+#include <boost/program_options.hpp>
+
+namespace po = boost::program_options;
+
+namespace mongo {
+
+ class TopTool : public Tool {
+ public:
+
+ TopTool() : Tool( "top" , REMOTE_SERVER , "admin" ) {
+ _sleep = 1;
+
+ add_hidden_options()
+ ( "sleep" , po::value<int>() , "time to sleep between calls" )
+ ;
+ addPositionArg( "sleep" , 1 );
+
+ _autoreconnect = true;
+ }
+
+ BSONObj getData() {
+ BSONObj out;
+ if ( ! conn().simpleCommand( _db , &out , "top" ) ) {
+ cout << "error: " << out << endl;
+ return BSONObj();
+ }
+ return out.getOwned();
+ }
+
+ void printDiff( BSONObj prev , BSONObj now ) {
+ if ( ! prev["totals"].isABSONObj() ||
+ ! now["totals"].isABSONObj() ) {
+ cout << "." << endl;
+ return;
+ }
+
+ prev = prev["totals"].Obj();
+ now = now["totals"].Obj();
+
+ vector<NSInfo> data;
+
+ unsigned longest = 30;
+
+ BSONObjIterator i( now );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+
+ // invalid, data fixed in 1.8.0
+ if ( e.fieldName()[0] == '?' )
+ continue;
+
+ if ( ! str::contains( e.fieldName() , '.' ) )
+ continue;
+
+ BSONElement old = prev[e.fieldName()];
+ if ( old.eoo() )
+ continue;
+
+ if ( strlen( e.fieldName() ) > longest )
+ longest = strlen(e.fieldName());
+
+ data.push_back( NSInfo( e.fieldName() , old.Obj() , e.Obj() ) );
+ }
+
+ std::sort( data.begin() , data.end() );
+
+ cout << "\n"
+ << setw(longest) << "ns"
+ << "\ttotal "
+ << "\tread "
+ << "\twrite "
+ << "\t\t" << terseCurrentTime()
+ << endl;
+ for ( int i=data.size()-1; i>=0 && data.size() - i < 10 ; i-- ) {
+ cout << setw(longest) << data[i].ns
+ << "\t" << setprecision(3) << data[i].diffTimeMS( "total" ) << "ms"
+ << "\t" << setprecision(3) << data[i].diffTimeMS( "readLock" ) << "ms"
+ << "\t" << setprecision(3) << data[i].diffTimeMS( "writeLock" ) << "ms"
+ << endl;
+ }
+ }
+
+ int run() {
+ _sleep = getParam( "sleep" , _sleep );
+
+ BSONObj prev = getData();
+
+ while ( true ) {
+ sleepsecs( _sleep );
+
+ BSONObj now;
+ try {
+ now = getData();
+ }
+ catch ( std::exception& e ) {
+ cout << "can't get data: " << e.what() << endl;
+ continue;
+ }
+
+ if ( now.isEmpty() )
+ return -2;
+
+ try {
+ printDiff( prev , now );
+ }
+ catch ( AssertionException& e ) {
+ cout << "\nerror: " << e.what() << "\n"
+ << now
+ << endl;
+ }
+
+
+ prev = now;
+ }
+
+ return 0;
+ }
+
+ struct NSInfo {
+ NSInfo( string thens , BSONObj a , BSONObj b ) {
+ ns = thens;
+ prev = a;
+ cur = b;
+
+ timeDiff = diffTime( "total" );
+ }
+
+
+ int diffTimeMS( const char * field ) const {
+ return (int)(diffTime( field ) / 1000);
+ }
+
+ double diffTime( const char * field ) const {
+ return diff( field , "time" );
+ }
+
+ double diffCount( const char * field ) const {
+ return diff( field , "count" );
+ }
+
+ /**
+ * @param field total,readLock, etc...
+ * @param type time or count
+ */
+ double diff( const char * field , const char * type ) const {
+ return cur[field].Obj()[type].number() - prev[field].Obj()[type].number();
+ }
+
+ bool operator<(const NSInfo& r) const {
+ return timeDiff < r.timeDiff;
+ }
+
+ string ns;
+
+ BSONObj prev;
+ BSONObj cur;
+
+ double timeDiff; // time diff between prev and cur
+ };
+
+ private:
+ int _sleep;
+ };
+
+}
+
+int main( int argc , char ** argv ) {
+ mongo::TopTool top;
+ return top.main( argc , argv );
+}
+