IndexCatalogEntry* IndexCatalogEntryContainer::find( const string& name ) { for ( iterator i = begin(); i != end(); ++i ) { IndexCatalogEntry* e = *i; if ( e->descriptor()->indexName() == name ) return e; } return NULL; }
IndexCatalogEntry* IndexCatalogEntryContainer::find( const IndexDescriptor* desc ) { if ( desc->_cachedEntry ) return desc->_cachedEntry; for ( iterator i = begin(); i != end(); ++i ) { IndexCatalogEntry* e = *i; if ( e->descriptor() == desc ) return e; } return NULL; }
IndexCatalogEntry* IndexCatalogEntryContainer::release( const IndexDescriptor* desc ) { for ( std::vector<IndexCatalogEntry*>::iterator i = _entries.mutableVector().begin(); i != _entries.mutableVector().end(); ++i ) { IndexCatalogEntry* e = *i; if ( e->descriptor() != desc ) continue; _entries.mutableVector().erase( i ); return e; } return NULL; }
Status BtreeBasedBulkAccessMethod::commit(set<DiskLoc>* dupsToDrop, CurOp* op, bool mayInterrupt) { DiskLoc oldHead = _real->_btreeState->head(); // XXX: do we expect the tree to be empty but have a head set? Looks like so from old code. invariant(!oldHead.isNull()); _real->_btreeState->setHead(_txn, DiskLoc()); _real->_btreeState->recordStore()->deleteRecord(_txn, oldHead); if (_isMultiKey) { _real->_btreeState->setMultikey( _txn ); } _sorter->sort(false); Timer timer; IndexCatalogEntry* entry = _real->_btreeState; bool dupsAllowed = !entry->descriptor()->unique() || ignoreUniqueIndex(entry->descriptor()); bool dropDups = entry->descriptor()->dropDups() || inDBRepair; scoped_ptr<BSONObjExternalSorter::Iterator> i(_sorter->iterator()); // verifies that pm and op refer to the same ProgressMeter ProgressMeter& pm = op->setMessage("Index Bulk Build: (2/3) btree bottom up", "Index: (2/3) BTree Bottom Up Progress", _keysInserted, 10); scoped_ptr<BtreeBuilderInterface> builder; builder.reset(_interface->getBulkBuilder(_txn, dupsAllowed)); while (i->more()) { // Get the next datum and add it to the builder. ExternalSortDatum d = i->next(); Status status = builder->addKey(d.first, d.second); if (!status.isOK()) { if (ErrorCodes::DuplicateKey != status.code()) { return status; } // If we're here it's a duplicate key. if (dropDups) { static const size_t kMaxDupsToStore = 1000000; dupsToDrop->insert(d.second); if (dupsToDrop->size() > kMaxDupsToStore) { return Status(ErrorCodes::InternalError, "Too many dups on index build with dropDups = true"); } } else if (!dupsAllowed) { return status; } } // If we're here either it's a dup and we're cool with it or the addKey went just // fine. pm.hit(); } pm.finished(); op->setMessage("Index Bulk Build: (3/3) btree-middle", "Index: (3/3) BTree Middle Progress"); LOG(timer.seconds() > 10 ? 0 : 1 ) << "\t done building bottom layer, going to commit"; unsigned long long keysCommit = builder->commit(mayInterrupt); if (!dropDups && (keysCommit != _keysInserted)) { warning() << "not all entries were added to the index, probably some " << "keys were too large" << endl; } return Status::OK(); }
StatusWith<CompactStats> compactCollection(OperationContext* opCtx, Collection* collection, const CompactOptions* compactOptions) { dassert(opCtx->lockState()->isCollectionLockedForMode(collection->ns().toString(), MODE_X)); DisableDocumentValidation validationDisabler(opCtx); auto recordStore = collection->getRecordStore(); auto indexCatalog = collection->getIndexCatalog(); if (!recordStore->compactSupported()) return StatusWith<CompactStats>(ErrorCodes::CommandNotSupported, str::stream() << "cannot compact collection with record store: " << recordStore->name()); if (recordStore->compactsInPlace()) { CompactStats stats; Status status = recordStore->compact(opCtx); if (!status.isOK()) return StatusWith<CompactStats>(status); // Compact all indexes (not including unfinished indexes) std::unique_ptr<IndexCatalog::IndexIterator> ii( indexCatalog->getIndexIterator(opCtx, false)); while (ii->more()) { IndexCatalogEntry* entry = ii->next(); IndexDescriptor* descriptor = entry->descriptor(); IndexAccessMethod* iam = entry->accessMethod(); LOG(1) << "compacting index: " << descriptor->toString(); Status status = iam->compact(opCtx); if (!status.isOK()) { error() << "failed to compact index: " << descriptor->toString(); return status; } } return StatusWith<CompactStats>(stats); } if (indexCatalog->numIndexesInProgress(opCtx)) return StatusWith<CompactStats>(ErrorCodes::BadValue, "cannot compact when indexes in progress"); std::vector<BSONObj> indexSpecs; { std::unique_ptr<IndexCatalog::IndexIterator> ii( indexCatalog->getIndexIterator(opCtx, false)); while (ii->more()) { IndexDescriptor* descriptor = ii->next()->descriptor(); // Compact always creates the new index in the foreground. const BSONObj spec = descriptor->infoObj().removeField(IndexDescriptor::kBackgroundFieldName); const BSONObj key = spec.getObjectField("key"); const Status keyStatus = index_key_validate::validateKeyPattern(key, descriptor->version()); if (!keyStatus.isOK()) { return StatusWith<CompactStats>( ErrorCodes::CannotCreateIndex, str::stream() << "Cannot compact collection due to invalid index " << spec << ": " << keyStatus.reason() << " For more info see" << " http://dochub.mongodb.org/core/index-validation"); } indexSpecs.push_back(spec); } } // Give a chance to be interrupted *before* we drop all indexes. opCtx->checkForInterrupt(); { // note that the drop indexes call also invalidates all clientcursors for the namespace, // which is important and wanted here WriteUnitOfWork wunit(opCtx); log() << "compact dropping indexes"; indexCatalog->dropAllIndexes(opCtx, true); wunit.commit(); } CompactStats stats; MultiIndexBlockImpl indexer(opCtx, collection); indexer.allowInterruption(); indexer.ignoreUniqueConstraint(); // in compact we should be doing no checking Status status = indexer.init(indexSpecs).getStatus(); if (!status.isOK()) return StatusWith<CompactStats>(status); status = recordStore->compact(opCtx); if (!status.isOK()) return StatusWith<CompactStats>(status); log() << "starting index commits"; status = indexer.dumpInsertsFromBulk(); if (!status.isOK()) return StatusWith<CompactStats>(status); { WriteUnitOfWork wunit(opCtx); status = indexer.commit(); if (!status.isOK()) { return StatusWith<CompactStats>(status); } wunit.commit(); } return StatusWith<CompactStats>(stats); }
void commit( set<DiskLoc>* dupsToDrop, CurOp* op, bool mayInterrupt ) { Timer timer; IndexCatalogEntry* entry = _real->_btreeState; bool dupsAllowed = !entry->descriptor()->unique() || ignoreUniqueIndex(entry->descriptor()); bool dropDups = entry->descriptor()->dropDups() || inDBRepair; BtreeBuilder<V> btBuilder(dupsAllowed, entry); BSONObj keyLast; scoped_ptr<BSONObjExternalSorter::Iterator> i( _phase1.sorter->iterator() ); // verifies that pm and op refer to the same ProgressMeter ProgressMeter& pm = op->setMessage("Index Bulk Build: (2/3) btree bottom up", "Index: (2/3) BTree Bottom Up Progress", _phase1.nkeys, 10); while( i->more() ) { RARELY if ( mayInterrupt ) killCurrentOp.checkForInterrupt(); ExternalSortDatum d = i->next(); try { if ( !dupsAllowed && dropDups ) { LastError::Disabled led( lastError.get() ); btBuilder.addKey(d.first, d.second); } else { btBuilder.addKey(d.first, d.second); } } catch( AssertionException& e ) { if ( dupsAllowed ) { // unknown exception?? throw; } if (ErrorCodes::isInterruption( DBException::convertExceptionCode(e.getCode()))) { killCurrentOp.checkForInterrupt(); } if ( ! dropDups ) throw; /* we could queue these on disk, but normally there are very few dups, * so instead we keep in ram and have a limit. */ if ( dupsToDrop ) { dupsToDrop->insert(d.second); uassert( 10092, "too may dups on index build with dropDups=true", dupsToDrop->size() < 1000000 ); } } pm.hit(); } pm.finished(); op->setMessage("Index Bulk Build: (3/3) btree-middle", "Index: (3/3) BTree Middle Progress"); LOG(timer.seconds() > 10 ? 0 : 1 ) << "\t done building bottom layer, going to commit"; btBuilder.commit( mayInterrupt ); if ( btBuilder.getn() != _phase1.nkeys && ! dropDups ) { warning() << "not all entries were added to the index, probably some " << "keys were too large" << endl; } }