summaryrefslogtreecommitdiff
path: root/db/pdfile.h
diff options
context:
space:
mode:
Diffstat (limited to 'db/pdfile.h')
-rw-r--r--db/pdfile.h236
1 files changed, 113 insertions, 123 deletions
diff --git a/db/pdfile.h b/db/pdfile.h
index d268aac..91f4877 100644
--- a/db/pdfile.h
+++ b/db/pdfile.h
@@ -29,8 +29,9 @@
#include "../util/mmap.h"
#include "diskloc.h"
#include "jsobjmanipulator.h"
-#include "namespace.h"
+#include "namespace-inl.h"
#include "client.h"
+#include "mongommf.h"
namespace mongo {
@@ -45,53 +46,60 @@ namespace mongo {
/* low level - only drops this ns */
void dropNS(const string& dropNs);
-
+
/* deletes this ns, indexes and cursors */
- void dropCollection( const string &name, string &errmsg, BSONObjBuilder &result );
+ void dropCollection( const string &name, string &errmsg, BSONObjBuilder &result );
bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForReplication, bool *deferIdIndex = 0);
shared_ptr<Cursor> findTableScan(const char *ns, const BSONObj& order, const DiskLoc &startLoc=DiskLoc());
-// -1 if library unavailable.
+ // -1 if library unavailable.
boost::intmax_t freeSpace( const string &path = dbpath );
+ bool isValidNS( const StringData& ns );
+
/*---------------------------------------------------------------------*/
class MongoDataFile {
friend class DataFileMgr;
friend class BasicCursor;
public:
- MongoDataFile(int fn) : fileNo(fn) { }
+ MongoDataFile(int fn) : _mb(0), fileNo(fn) { }
void open(const char *filename, int requestedDataSize = 0, bool preallocateOnly = false);
- /* allocate a new extent from this datafile.
+ /* allocate a new extent from this datafile.
@param capped - true if capped collection
@param loops is our recursion check variable - you want to pass in zero
*/
Extent* createExtent(const char *ns, int approxSize, bool capped = false, int loops = 0);
- DataFileHeader *getHeader() {
- return header;
- }
+ DataFileHeader *getHeader() { return header(); }
+
+ unsigned long long length() const { return mmf.length(); }
/* return max size an extent may be */
static int maxSize();
-
+
+ /** fsync */
void flush( bool sync );
-
+
+ /** only use fore debugging */
+ Extent* debug_getExtent(DiskLoc loc) { return _getExtent( loc ); }
private:
void badOfs(int) const;
-
+ void badOfs2(int) const;
int defaultSize( const char *filename ) const;
- Extent* getExtent(DiskLoc loc);
- Extent* _getExtent(DiskLoc loc);
+ Extent* getExtent(DiskLoc loc) const;
+ Extent* _getExtent(DiskLoc loc) const;
Record* recordAt(DiskLoc dl);
Record* makeRecord(DiskLoc dl, int size);
- void grow(DiskLoc dl, int size);
+ void grow(DiskLoc dl, int size);
- MMF mmf;
- MMF::Pointer _p;
- DataFileHeader *header;
+ char* p() const { return (char *) _mb; }
+ DataFileHeader* header() { return (DataFileHeader*) _mb; }
+
+ MongoMMF mmf;
+ void *_mb; // the memory mapped view
int fileNo;
};
@@ -110,9 +118,9 @@ namespace mongo {
NamespaceDetails *d,
NamespaceDetailsTransient *nsdt,
Record *toupdate, const DiskLoc& dl,
- const char *buf, int len, OpDebug& debug, bool &changedId, bool god=false);
+ const char *buf, int len, OpDebug& debug, bool god=false);
- // The object o may be updated if modified on insert.
+ // The object o may be updated if modified on insert.
void insertAndLog( const char *ns, const BSONObj &o, bool god = false );
/** @param obj both and in and out param -- insert can sometimes modify an object (such as add _id). */
@@ -122,7 +130,6 @@ namespace mongo {
void insertNoReturnVal(const char *ns, BSONObj o, bool god = false);
DiskLoc insert(const char *ns, const void *buf, int len, bool god = false, const BSONElement &writeId = BSONElement(), bool mayAddIndex = true);
- void deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK = false, bool noWarn = false);
static shared_ptr<Cursor> findAll(const char *ns, const DiskLoc &startLoc = DiskLoc());
/* special version of insert for transaction logging -- streamlined a bit.
@@ -134,9 +141,10 @@ namespace mongo {
static Extent* getExtent(const DiskLoc& dl);
static Record* getRecord(const DiskLoc& dl);
static DeletedRecord* makeDeletedRecord(const DiskLoc& dl, int len);
- static void grow(const DiskLoc& dl, int len);
- /* does not clean up indexes, etc. : just deletes the record in the pdfile. */
+ void deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK = false, bool noWarn = false);
+
+ /* does not clean up indexes, etc. : just deletes the record in the pdfile. use deleteRecord() to unindex */
void _deleteRecord(NamespaceDetails *d, const char *ns, Record *todelete, const DiskLoc& dl);
private:
@@ -175,7 +183,10 @@ namespace mongo {
int extentOfs;
int nextOfs;
int prevOfs;
+
+ /** be careful when referencing this that your write intent was correct */
char data[4];
+
int netLength() {
return lengthWithHeaders - HeaderSize;
}
@@ -192,6 +203,12 @@ namespace mongo {
/* get the next record in the namespace, traversing extents as necessary */
DiskLoc getNext(const DiskLoc& myLoc);
DiskLoc getPrev(const DiskLoc& myLoc);
+
+ struct NP {
+ int nextOfs;
+ int prevOfs;
+ };
+ NP* np() { return (NP*) &nextOfs; }
};
/* extents are datafile regions where all the records within the region
@@ -206,13 +223,14 @@ namespace mongo {
DiskLoc myLoc;
DiskLoc xnext, xprev; /* next/prev extent for this namespace */
- /* which namespace this extent is for. this is just for troubleshooting really
+ /* which namespace this extent is for. this is just for troubleshooting really
and won't even be correct if the collection were renamed!
*/
- Namespace nsDiagnostic;
+ Namespace nsDiagnostic;
int length; /* size of the extent, including these fields */
- DiskLoc firstRecord, lastRecord;
+ DiskLoc firstRecord;
+ DiskLoc lastRecord;
char _extentData[4];
static int HeaderSize() { return sizeof(Extent)-4; }
@@ -224,7 +242,7 @@ namespace mongo {
void dump(iostream& s) {
s << " loc:" << myLoc.toString() << " xnext:" << xnext.toString() << " xprev:" << xprev.toString() << '\n';
- s << " nsdiag:" << nsDiagnostic.buf << '\n';
+ s << " nsdiag:" << nsDiagnostic.toString() << '\n';
s << " size:" << length << " firstRecord:" << firstRecord.toString() << " lastRecord:" << lastRecord.toString() << '\n';
}
@@ -237,9 +255,8 @@ namespace mongo {
/* like init(), but for a reuse case */
DiskLoc reuse(const char *nsname);
- void assertOk() {
- assert(magic == 0x41424344);
- }
+ bool isOk() const { return magic == 0x41424344; }
+ void assertOk() const { assert(isOk()); }
Record* newRecord(int len);
@@ -251,19 +268,38 @@ namespace mongo {
return (Record *) (((char *) this) + x);
}
- Extent* getNextExtent() {
- return xnext.isNull() ? 0 : DataFileMgr::getExtent(xnext);
- }
- Extent* getPrevExtent() {
- return xprev.isNull() ? 0 : DataFileMgr::getExtent(xprev);
- }
-
+ Extent* getNextExtent() { return xnext.isNull() ? 0 : DataFileMgr::getExtent(xnext); }
+ Extent* getPrevExtent() { return xprev.isNull() ? 0 : DataFileMgr::getExtent(xprev); }
+
static int maxSize();
+ static int minSize() { return 0x100; }
+ /**
+ * @param len lengt of record we need
+ * @param lastRecord size of last extent which is a factor in next extent size
+ */
+ static int followupSize(int len, int lastExtentLen);
+
+ /**
+ * @param len lengt of record we need
+ */
+ static int initialSize(int len);
+
+ struct FL {
+ DiskLoc firstRecord;
+ DiskLoc lastRecord;
+ };
+ /** often we want to update just the firstRecord and lastRecord fields.
+ this helper is for that -- for use with getDur().writing() method
+ */
+ FL* fl() { return (FL*) &firstRecord; }
+ private:
+ DiskLoc _reuse(const char *nsname);
};
- /*
+ /* a datafile - i.e. the "dbname.<#>" files :
+
----------------------
- Header
+ DataFileHeader
----------------------
Extent (for a particular namespace)
Record
@@ -273,7 +309,6 @@ namespace mongo {
more Extents...
----------------------
*/
-
class DataFileHeader {
public:
int version;
@@ -287,35 +322,27 @@ namespace mongo {
enum { HeaderSize = 8192 };
- bool currentVersion() const {
- return ( version == VERSION ) && ( versionMinor == VERSION_MINOR );
- }
-
- bool uninitialized() const {
- if ( version == 0 ) return true;
- return false;
- }
+ bool isCurrentVersion() const { return ( version == VERSION ) && ( versionMinor == VERSION_MINOR ); }
- /*Record* __getRecord(DiskLoc dl) {
- int ofs = dl.getOfs();
- assert( ofs >= HeaderSize );
- return (Record*) (((char *) this) + ofs);
- }*/
+ bool uninitialized() const { return version == 0; }
- void init(int fileno, int filelength) {
+ void init(int fileno, int filelength, const char* filename) {
if ( uninitialized() ) {
- assert(filelength > 32768 );
+ if( !(filelength > 32768 ) ) {
+ massert(13640, str::stream() << "DataFileHeader looks corrupt at file open filelength:" << filelength << " fileno:" << fileno, false);
+ }
+ getDur().createdFile(filename, filelength);
assert( HeaderSize == 8192 );
- fileLength = filelength;
- version = VERSION;
- versionMinor = VERSION_MINOR;
- unused.setOfs( fileno, HeaderSize );
+ DataFileHeader *h = getDur().writing(this);
+ h->fileLength = filelength;
+ h->version = VERSION;
+ h->versionMinor = VERSION_MINOR;
+ h->unused.set( fileno, HeaderSize );
assert( (data-(char*)this) == HeaderSize );
- unusedLength = fileLength - HeaderSize - 16;
- //memcpy(data+unusedLength, " \nthe end\n", 16);
+ h->unusedLength = fileLength - HeaderSize - 16;
}
}
-
+
bool isEmpty() const {
return uninitialized() || ( unusedLength == fileLength - HeaderSize - 16 );
}
@@ -323,13 +350,13 @@ namespace mongo {
#pragma pack()
- inline Extent* MongoDataFile::_getExtent(DiskLoc loc) {
+ inline Extent* MongoDataFile::_getExtent(DiskLoc loc) const {
loc.assertOk();
- Extent *e = (Extent *) _p.at(loc.getOfs(), Extent::HeaderSize());
+ Extent *e = (Extent *) (p()+loc.getOfs());
return e;
}
- inline Extent* MongoDataFile::getExtent(DiskLoc loc) {
+ inline Extent* MongoDataFile::getExtent(DiskLoc loc) const {
Extent *e = _getExtent(loc);
e->assertOk();
return e;
@@ -344,18 +371,13 @@ namespace mongo {
inline Record* MongoDataFile::recordAt(DiskLoc dl) {
int ofs = dl.getOfs();
if( ofs < DataFileHeader::HeaderSize ) badOfs(ofs); // will uassert - external call to keep out of the normal code path
- return (Record*) _p.at(ofs, -1);
+ return (Record*) (p()+ofs);
}
- inline void MongoDataFile::grow(DiskLoc dl, int size) {
- int ofs = dl.getOfs();
- _p.grow(ofs, size);
- }
-
- inline Record* MongoDataFile::makeRecord(DiskLoc dl, int size) {
+ inline Record* MongoDataFile::makeRecord(DiskLoc dl, int size) {
int ofs = dl.getOfs();
- assert( ofs >= DataFileHeader::HeaderSize );
- return (Record*) _p.at(ofs, size);
+ if( ofs < DataFileHeader::HeaderSize ) badOfs(ofs); // will uassert - external call to keep out of the normal code path
+ return (Record*) (p()+ofs);
}
inline DiskLoc Record::getNext(const DiskLoc& myLoc) {
@@ -395,50 +417,23 @@ namespace mongo {
return BSONObj(rec());
}
inline DeletedRecord* DiskLoc::drec() const {
- assert( fileNo != -1 );
+ assert( _a != -1 );
return (DeletedRecord*) rec();
}
inline Extent* DiskLoc::ext() const {
return DataFileMgr::getExtent(*this);
}
-
- /*---------------------------------------------------------------------*/
+ inline const BtreeBucket* DiskLoc::btree() const {
+ assert( _a != -1 );
+ return (const BtreeBucket *) rec()->data;
+ }
} // namespace mongo
-#include "rec.h"
#include "database.h"
namespace mongo {
- // Heritable class to implement an operation that may be applied to all
- // files in a database using _applyOpToDataFiles()
- class FileOp {
- public:
- virtual ~FileOp() {}
- // Return true if file exists and operation successful
- virtual bool apply( const boost::filesystem::path &p ) = 0;
- virtual const char * op() const = 0;
- };
-
- void _applyOpToDataFiles( const char *database, FileOp &fo, bool afterAllocator = false, const string& path = dbpath );
-
- inline void _deleteDataFiles(const char *database) {
- if ( directoryperdb ) {
- BOOST_CHECK_EXCEPTION( boost::filesystem::remove_all( boost::filesystem::path( dbpath ) / database ) );
- return;
- }
- class : public FileOp {
- virtual bool apply( const boost::filesystem::path &p ) {
- return boost::filesystem::remove( p );
- }
- virtual const char * op() const {
- return "remove";
- }
- } deleter;
- _applyOpToDataFiles( database, deleter, true );
- }
-
boost::intmax_t dbSize( const char *database );
inline NamespaceIndex* nsindex(const char *ns) {
@@ -462,11 +457,6 @@ namespace mongo {
return nsindex(ns)->details(ns);
}
- inline MongoDataFile& DiskLoc::pdf() const {
- assert( fileNo != -1 );
- return *cc().database()->getFile(fileNo);
- }
-
inline Extent* DataFileMgr::getExtent(const DiskLoc& dl) {
assert( dl.a() != -1 );
return cc().database()->getFile(dl.a())->getExtent(dl);
@@ -477,30 +467,30 @@ namespace mongo {
return cc().database()->getFile(dl.a())->recordAt(dl);
}
- BOOST_STATIC_ASSERT( 16 == sizeof(DeletedRecord) );
-
- inline void DataFileMgr::grow(const DiskLoc& dl, int len) {
- assert( dl.a() != -1 );
- cc().database()->getFile(dl.a())->grow(dl, len);
- }
+ BOOST_STATIC_ASSERT( 16 == sizeof(DeletedRecord) );
- inline DeletedRecord* DataFileMgr::makeDeletedRecord(const DiskLoc& dl, int len) {
+ inline DeletedRecord* DataFileMgr::makeDeletedRecord(const DiskLoc& dl, int len) {
assert( dl.a() != -1 );
return (DeletedRecord*) cc().database()->getFile(dl.a())->makeRecord(dl, sizeof(DeletedRecord));
}
-
+
void ensureHaveIdIndex(const char *ns);
-
+
bool dropIndexes( NamespaceDetails *d, const char *ns, const char *name, string &errmsg, BSONObjBuilder &anObjBuilder, bool maydeleteIdIndex );
/**
- * @return true if ns is ok
+ * @return true if ns is 'normal'. $ used for collections holding index data, which do not contain BSON objects in their records.
+ * special case for the local.oplog.$main ns -- naming it as such was a mistake.
*/
- inline bool nsDollarCheck( const char* ns ){
+ inline bool isANormalNSName( const char* ns ) {
if ( strchr( ns , '$' ) == 0 )
return true;
-
return strcmp( ns, "local.oplog.$main" ) == 0;
}
+
+ inline BSONObj::BSONObj(const Record *r) {
+ init(r->data, false);
+ }
+
} // namespace mongo