unsigned long long go(string ns, NamespaceDetails *d, IndexDetails& idx) { // clear cached things since we are changing state // namely what fields are indexed NamespaceDetailsTransient::get(ns.c_str()).addedIndex(); unsigned long long n = 0; prep(ns.c_str(), d); try { idx.head.writing() = BtreeBasedBuilder::makeEmptyIndex(idx); n = addExistingToIndex(ns.c_str(), d, idx); // idx may point at an invalid index entry at this point } catch(...) { if( cc().database() && nsdetails(ns) == d ) { done(ns.c_str()); } else { log() << "ERROR: db gone during bg index?" << endl; } throw; } done(ns.c_str()); return n; }
unsigned long long BackgroundIndexBuildJob::go( Collection* collection, IndexDetails& idx) { string ns = collection->ns().ns(); // clear cached things since we are changing state // namely what fields are indexed collection->infoCache()->addedIndex(); prep( ns ); try { idx.head.writing() = BtreeBasedBuilder::makeEmptyIndex( idx ); unsigned long long n = addExistingToIndex( collection, idx ); // idx may point at an invalid index entry at this point done( ns ); return n; } catch (...) { done( ns ); throw; } }
unsigned long long go(string ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) { unsigned long long n = 0; prep(ns.c_str(), d); verify( idxNo == d->nIndexes ); try { idx.head.writing() = idx.idxInterface().addBucket(idx); n = addExistingToIndex(ns.c_str(), d, idx, idxNo); } catch(...) { if( cc().database() && nsdetails(ns.c_str()) == d ) { verify( idxNo == d->nIndexes ); done(ns.c_str(), d); } else { log() << "ERROR: db gone during bg index?" << endl; } throw; } verify( idxNo == d->nIndexes ); done(ns.c_str(), d); return n; }
// throws DBException void buildAnIndex( OperationContext* txn, Collection* collection, IndexCatalogEntry* btreeState, bool mayInterrupt ) { string ns = collection->ns().ns(); // our copy const IndexDescriptor* idx = btreeState->descriptor(); const BSONObj& idxInfo = idx->infoObj(); MONGO_TLOG(0) << "build index on: " << ns << " properties: " << idx->toString() << endl; audit::logCreateIndex( currentClient.get(), &idxInfo, idx->indexName(), ns ); Timer t; verify( Lock::isWriteLocked( ns ) ); // this is so that people know there are more keys to look at when doing // things like in place updates, etc... collection->infoCache()->addedIndex(); if ( collection->numRecords() == 0 ) { Status status = btreeState->accessMethod()->initializeAsEmpty(txn); massert( 17343, str::stream() << "IndexAccessMethod::initializeAsEmpty failed" << status.toString(), status.isOK() ); MONGO_TLOG(0) << "\t added index to empty collection"; return; } scoped_ptr<BackgroundOperation> backgroundOperation; bool doInBackground = false; if ( idxInfo["background"].trueValue() && !inDBRepair ) { doInBackground = true; backgroundOperation.reset( new BackgroundOperation(ns) ); uassert( 13130, "can't start bg index b/c in recursive lock (db.eval?)", !Lock::nested() ); log() << "\t building index in background"; } Status status = btreeState->accessMethod()->initializeAsEmpty(txn); massert( 17342, str::stream() << "IndexAccessMethod::initializeAsEmpty failed" << status.toString(), status.isOK() ); IndexAccessMethod* bulk = doInBackground ? NULL : btreeState->accessMethod()->initiateBulk(txn, collection->numRecords()); scoped_ptr<IndexAccessMethod> bulkHolder(bulk); IndexAccessMethod* iam = bulk ? bulk : btreeState->accessMethod(); if ( bulk ) log() << "\t building index using bulk method"; unsigned long long n = addExistingToIndex( txn, collection, btreeState->descriptor(), iam, doInBackground ); if ( bulk ) { LOG(1) << "\t bulk commit starting"; std::set<DiskLoc> dupsToDrop; Status status = btreeState->accessMethod()->commitBulk( bulk, mayInterrupt, &dupsToDrop ); // Code above us expects a uassert in case of dupkey errors. if (ErrorCodes::DuplicateKey == status.code()) { uassertStatusOK(status); } // Any other errors are probably bad and deserve a massert. massert( 17398, str::stream() << "commitBulk failed: " << status.toString(), status.isOK() ); if ( dupsToDrop.size() ) log() << "\t bulk dropping " << dupsToDrop.size() << " dups"; for( set<DiskLoc>::const_iterator i = dupsToDrop.begin(); i != dupsToDrop.end(); ++i ) { BSONObj toDelete; collection->deleteDocument( txn, *i, false /* cappedOk */, true /* noWarn */, &toDelete ); if (isMasterNs(ns.c_str())) { logOp( txn, "d", ns.c_str(), toDelete ); } txn->recoveryUnit()->commitIfNeeded(); RARELY if ( mayInterrupt ) { txn->checkForInterrupt(); } } } verify( !btreeState->head().isNull() ); MONGO_TLOG(0) << "build index done. scanned " << n << " total records. " << t.millis() / 1000.0 << " secs" << endl; // this one is so people know that the index is finished collection->infoCache()->addedIndex(); }