summaryrefslogtreecommitdiff
path: root/db/index.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'db/index.cpp')
-rw-r--r--db/index.cpp148
1 files changed, 96 insertions, 52 deletions
diff --git a/db/index.cpp b/db/index.cpp
index 04eca73..c696e27 100644
--- a/db/index.cpp
+++ b/db/index.cpp
@@ -17,15 +17,16 @@
*/
#include "pch.h"
-#include "namespace.h"
+#include "namespace-inl.h"
#include "index.h"
#include "btree.h"
#include "query.h"
#include "background.h"
+#include "repl/rs.h"
namespace mongo {
- int removeFromSysIndexes(const char *ns, const char *idxName) {
+ int removeFromSysIndexes(const char *ns, const char *idxName) {
string system_indexes = cc().database()->name + ".system.indexes";
BSONObjBuilder b;
b.append("ns", ns);
@@ -34,24 +35,36 @@ namespace mongo {
return (int) deleteObjects(system_indexes.c_str(), cond, false, false, true);
}
- /* this is just an attempt to clean up old orphaned stuff on a delete all indexes
- call. repair database is the clean solution, but this gives one a lighter weight
+ /* this is just an attempt to clean up old orphaned stuff on a delete all indexes
+ call. repair database is the clean solution, but this gives one a lighter weight
partial option. see dropIndexes()
*/
- void assureSysIndexesEmptied(const char *ns, IndexDetails *idIndex) {
+ void assureSysIndexesEmptied(const char *ns, IndexDetails *idIndex) {
string system_indexes = cc().database()->name + ".system.indexes";
BSONObjBuilder b;
b.append("ns", ns);
- if( idIndex ) {
+ if( idIndex ) {
b.append("name", BSON( "$ne" << idIndex->indexName().c_str() ));
}
BSONObj cond = b.done();
int n = (int) deleteObjects(system_indexes.c_str(), cond, false, false, true);
- if( n ) {
+ if( n ) {
log() << "info: assureSysIndexesEmptied cleaned up " << n << " entries" << endl;
}
}
+ int IndexDetails::keyPatternOffset( const string& key ) const {
+ BSONObjIterator i( keyPattern() );
+ int n = 0;
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( key == e.fieldName() )
+ return n;
+ n++;
+ }
+ return -1;
+ }
+
const IndexSpec& IndexDetails::getSpec() const {
scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
return NamespaceDetailsTransient::get_inlock( info.obj()["ns"].valuestr() ).getIndexSpec( this );
@@ -62,29 +75,35 @@ namespace mongo {
*/
void IndexDetails::kill_idx() {
string ns = indexNamespace(); // e.g. foo.coll.$ts_1
+ try {
- string pns = parentNS(); // note we need a copy, as parentNS() won't work after the drop() below
-
- // clean up parent namespace index cache
- NamespaceDetailsTransient::get_w( pns.c_str() ).deletedIndex();
+ string pns = parentNS(); // note we need a copy, as parentNS() won't work after the drop() below
- string name = indexName();
+ // clean up parent namespace index cache
+ NamespaceDetailsTransient::get_w( pns.c_str() ).deletedIndex();
+
+ string name = indexName();
+
+ /* important to catch exception here so we can finish cleanup below. */
+ try {
+ dropNS(ns.c_str());
+ }
+ catch(DBException& ) {
+ log(2) << "IndexDetails::kill(): couldn't drop ns " << ns << endl;
+ }
+ head.setInvalid();
+ info.setInvalid();
+
+ // clean up in system.indexes. we do this last on purpose.
+ int n = removeFromSysIndexes(pns.c_str(), name.c_str());
+ wassert( n == 1 );
- /* important to catch exception here so we can finish cleanup below. */
- try {
- btreeStore->drop(ns.c_str());
}
- catch(DBException& ) {
- log(2) << "IndexDetails::kill(): couldn't drop ns " << ns << endl;
+ catch ( DBException &e ) {
+ log() << "exception in kill_idx: " << e << ", ns: " << ns << endl;
}
- head.setInvalid();
- info.setInvalid();
-
- // clean up in system.indexes. we do this last on purpose.
- int n = removeFromSysIndexes(pns.c_str(), name.c_str());
- wassert( n == 1 );
}
-
+
void IndexDetails::getKeysFromObject( const BSONObj& obj, BSONObjSetDefaultOrder& keys) const {
getSpec().getKeys( obj, keys );
}
@@ -105,7 +124,7 @@ namespace mongo {
}
}
- void getIndexChanges(vector<IndexChanges>& v, NamespaceDetails& d, BSONObj newObj, BSONObj oldObj, bool &changedId) {
+ void getIndexChanges(vector<IndexChanges>& v, NamespaceDetails& d, BSONObj newObj, BSONObj oldObj, bool &changedId) {
int z = d.nIndexesBeingBuilt();
v.resize(z);
NamespaceDetails::IndexIterator i = d.ii();
@@ -115,7 +134,7 @@ namespace mongo {
IndexChanges& ch = v[i];
idx.getKeysFromObject(oldObj, ch.oldkeys);
idx.getKeysFromObject(newObj, ch.newkeys);
- if( ch.newkeys.size() > 1 )
+ if( ch.newkeys.size() > 1 )
d.setIndexIsMultikey(i);
setDifference(ch.oldkeys, ch.newkeys, ch.removed);
setDifference(ch.newkeys, ch.oldkeys, ch.added);
@@ -133,12 +152,12 @@ namespace mongo {
}
}
- // should be { <something> : <simpletype[1|-1]>, .keyp.. }
- static bool validKeyPattern(BSONObj kp) {
+ // should be { <something> : <simpletype[1|-1]>, .keyp.. }
+ static bool validKeyPattern(BSONObj kp) {
BSONObjIterator i(kp);
- while( i.moreWithEOO() ) {
+ while( i.moreWithEOO() ) {
BSONElement e = i.next();
- if( e.type() == Object || e.type() == Array )
+ if( e.type() == Object || e.type() == Array )
return false;
}
return true;
@@ -154,29 +173,23 @@ namespace mongo {
throws DBException
- @return
- true if ok to continue. when false we stop/fail silently (index already exists)
- sourceNS - source NS we are indexing
- sourceCollection - its details ptr
+ @param sourceNS - source NS we are indexing
+ @param sourceCollection - its details ptr
+ @return true if ok to continue. when false we stop/fail silently (index already exists)
*/
- bool prepareToBuildIndex(const BSONObj& io, bool god, string& sourceNS, NamespaceDetails *&sourceCollection) {
+ bool prepareToBuildIndex(const BSONObj& io, bool god, string& sourceNS, NamespaceDetails *&sourceCollection, BSONObj& fixedIndexObject ) {
sourceCollection = 0;
// logical name of the index. todo: get rid of the name, we don't need it!
- const char *name = io.getStringField("name");
+ const char *name = io.getStringField("name");
uassert(12523, "no index name specified", *name);
// the collection for which we are building an index
- sourceNS = io.getStringField("ns");
+ sourceNS = io.getStringField("ns");
uassert(10096, "invalid ns to index", sourceNS.find( '.' ) != string::npos);
- uassert(10097, "bad table to index name on add index attempt",
- cc().database()->name == nsToDatabase(sourceNS.c_str()));
+ uassert(10097, "bad table to index name on add index attempt",
+ cc().database()->name == nsToDatabase(sourceNS.c_str()));
- /* we can't build a new index for the ns if a build is already in progress in the background -
- EVEN IF this is a foreground build.
- */
- uassert(12588, "cannot add index with a background operation in progress",
- !BackgroundOperation::inProgForNs(sourceNS.c_str()));
BSONObj key = io.getObjectField("key");
uassert(12524, "index key pattern too large", key.objsize() <= 2048);
@@ -187,7 +200,7 @@ namespace mongo {
if ( sourceNS.empty() || key.isEmpty() ) {
log(2) << "bad add index attempt name:" << (name?name:"") << "\n ns:" <<
- sourceNS << "\n idxobj:" << io.toString() << endl;
+ sourceNS << "\n idxobj:" << io.toString() << endl;
string s = "bad add index attempt " + sourceNS + " key:" + key.toString();
uasserted(12504, s);
}
@@ -201,7 +214,7 @@ namespace mongo {
return false;
}
sourceCollection = nsdetails(sourceNS.c_str());
- tlog() << "info: creating collection " << sourceNS << " on add index\n";
+ tlog() << "info: creating collection " << sourceNS << " on add index" << endl;
assert( sourceCollection );
}
@@ -222,24 +235,55 @@ namespace mongo {
uasserted(12505,s);
}
- /* this is because we want key patterns like { _id : 1 } and { _id : <someobjid> } to
+ /* we can't build a new index for the ns if a build is already in progress in the background -
+ EVEN IF this is a foreground build.
+ */
+ uassert(12588, "cannot add index with a background operation in progress",
+ !BackgroundOperation::inProgForNs(sourceNS.c_str()));
+
+ /* this is because we want key patterns like { _id : 1 } and { _id : <someobjid> } to
all be treated as the same pattern.
*/
- if ( !god && IndexDetails::isIdIndexPattern(key) ) {
- ensureHaveIdIndex( sourceNS.c_str() );
- return false;
+ if ( IndexDetails::isIdIndexPattern(key) ) {
+ if( !god ) {
+ ensureHaveIdIndex( sourceNS.c_str() );
+ return false;
+ }
+ }
+ else {
+ /* is buildIndexes:false set for this replica set member?
+ if so we don't build any indexes except _id
+ */
+ if( theReplSet && !theReplSet->buildIndexes() )
+ return false;
+ }
+
+ string pluginName = IndexPlugin::findPluginName( key );
+ IndexPlugin * plugin = pluginName.size() ? IndexPlugin::get( pluginName ) : 0;
+
+ if ( plugin ) {
+ fixedIndexObject = plugin->adjustIndexSpec( io );
+ }
+ else if ( io["v"].eoo() ) {
+ // add "v" if it doesn't exist
+ // if it does - leave whatever value was there
+ // this is for testing and replication
+ BSONObjBuilder b( io.objsize() + 32 );
+ b.appendElements( io );
+ b.append( "v" , 0 );
+ fixedIndexObject = b.obj();
}
return true;
}
- void IndexSpec::reset( const IndexDetails * details ){
+ void IndexSpec::reset( const IndexDetails * details ) {
_details = details;
reset( details->info );
}
- void IndexSpec::reset( const DiskLoc& loc ){
+ void IndexSpec::reset( const DiskLoc& loc ) {
info = loc.obj();
keyPattern = info["key"].embeddedObjectUserCheck();
if ( keyPattern.objsize() == 0 ) {