void run() { OperationContextImpl txn; Client::WriteContext ctx(&txn, _ns); int numFinishedIndexesStart = _catalog->numIndexesReady(&txn); Helpers::ensureIndex(&txn, _coll, BSON("x" << 1), false, "_x_0"); Helpers::ensureIndex(&txn, _coll, BSON("y" << 1), false, "_y_0"); ASSERT_TRUE(_catalog->numIndexesReady(&txn) == numFinishedIndexesStart+2); IndexCatalog::IndexIterator ii = _catalog->getIndexIterator(&txn,false); int indexesIterated = 0; bool foundIndex = false; while (ii.more()) { IndexDescriptor* indexDesc = ii.next(); indexesIterated++; BSONObjIterator boit(indexDesc->infoObj()); while (boit.more() && !foundIndex) { BSONElement e = boit.next(); if (str::equals(e.fieldName(), "name") && str::equals(e.valuestrsafe(), "_y_0")) { foundIndex = true; break; } } } ctx.commit(); ASSERT_TRUE(indexesIterated == _catalog->numIndexesReady(&txn)); ASSERT_TRUE(foundIndex); }
void CollectionInfoCache::computeIndexKeys() { DEV Lock::assertWriteLocked( _collection->ns().ns() ); _indexedPaths.clear(); IndexCatalog::IndexIterator i = _collection->getIndexCatalog()->getIndexIterator(true); while (i.more()) { IndexDescriptor* descriptor = i.next(); if (descriptor->getAccessMethodName() != IndexNames::TEXT) { BSONObj key = descriptor->keyPattern(); BSONObjIterator j(key); while (j.more()) { BSONElement e = j.next(); _indexedPaths.addPath(e.fieldName()); } } else { fts::FTSSpec ftsSpec(descriptor->infoObj()); if (ftsSpec.wildcard()) { _indexedPaths.allPathsIndexed(); } else { for (size_t i = 0; i < ftsSpec.numExtraBefore(); ++i) { _indexedPaths.addPath(ftsSpec.extraBefore(i)); } for (fts::Weights::const_iterator it = ftsSpec.weights().begin(); it != ftsSpec.weights().end(); ++it) { _indexedPaths.addPath(it->first); } for (size_t i = 0; i < ftsSpec.numExtraAfter(); ++i) { _indexedPaths.addPath(ftsSpec.extraAfter(i)); } // Any update to a path containing "language" as a component could change the // language of a subdocument. Add the override field as a path component. _indexedPaths.addPathComponent(ftsSpec.languageOverrideField()); } } } _keysComputed = true; }
void run() { Client::WriteContext ctx(_ns); int numFinishedIndexesStart = _catalog->numIndexesReady(); BSONObjBuilder b1; b1.append("key", BSON("x" << 1)); b1.append("ns", _ns); b1.append("name", "_x_0"); _catalog->createIndex(b1.obj(), true); BSONObjBuilder b2; b2.append("key", BSON("y" << 1)); b2.append("ns", _ns); b2.append("name", "_y_0"); _catalog->createIndex(b2.obj(), true); ASSERT_TRUE(_catalog->numIndexesReady() == numFinishedIndexesStart+2); IndexCatalog::IndexIterator ii = _catalog->getIndexIterator(false); int indexesIterated = 0; bool foundIndex = false; while (ii.more()) { IndexDescriptor* indexDesc = ii.next(); indexesIterated++; BSONObjIterator boit(indexDesc->infoObj()); while (boit.more() && !foundIndex) { BSONElement e = boit.next(); if (str::equals(e.fieldName(), "name") && str::equals(e.valuestrsafe(), "_y_0")) { foundIndex = true; break; } } } ASSERT_TRUE(indexesIterated == _catalog->numIndexesReady()); ASSERT_TRUE(foundIndex); }
StatusWith<CompactStats> Collection::compact( const CompactOptions* compactOptions ) { if ( isCapped() ) return StatusWith<CompactStats>( ErrorCodes::BadValue, "cannot compact capped collection" ); if ( _indexCatalog.numIndexesInProgress() ) return StatusWith<CompactStats>( ErrorCodes::BadValue, "cannot compact when indexes in progress" ); NamespaceDetails* d = details(); // this is a big job, so might as well make things tidy before we start just to be nice. getDur().commitIfNeeded(); list<DiskLoc> extents; for( DiskLoc L = d->firstExtent(); !L.isNull(); L = L.ext()->xnext ) extents.push_back(L); log() << "compact " << extents.size() << " extents" << endl; // same data, but might perform a little different after compact? _infoCache.reset(); vector<BSONObj> indexSpecs; { IndexCatalog::IndexIterator ii( _indexCatalog.getIndexIterator( false ) ); while ( ii.more() ) { IndexDescriptor* descriptor = ii.next(); const BSONObj spec = _compactAdjustIndexSpec(descriptor->infoObj()); const BSONObj key = spec.getObjectField("key"); const Status keyStatus = validateKeyPattern(key); if (!keyStatus.isOK()) { return StatusWith<CompactStats>( ErrorCodes::CannotCreateIndex, str::stream() << "Cannot rebuild index " << spec << ": " << keyStatus.reason() << " For more info see" << " http://dochub.mongodb.org/core/index-validation"); } indexSpecs.push_back(spec); } } log() << "compact orphan deleted lists" << endl; d->orphanDeletedList(); // Start over from scratch with our extent sizing and growth d->setLastExtentSize( 0 ); // before dropping indexes, at least make sure we can allocate one extent! // this will allocate an extent and add to free list // if it cannot, it will throw an exception increaseStorageSize( _details->lastExtentSize(), true ); // note that the drop indexes call also invalidates all clientcursors for the namespace, // which is important and wanted here log() << "compact dropping indexes" << endl; Status status = _indexCatalog.dropAllIndexes( true ); if ( !status.isOK() ) { return StatusWith<CompactStats>( status ); } getDur().commitIfNeeded(); killCurrentOp.checkForInterrupt(); CompactStats stats; MultiIndexBlock multiIndexBlock( this ); status = multiIndexBlock.init( indexSpecs ); if ( !status.isOK() ) return StatusWith<CompactStats>( status ); // reset data size and record counts to 0 for this namespace // as we're about to tally them up again for each new extent d->setStats( 0, 0 ); ProgressMeterHolder pm(cc().curop()->setMessage("compact extent", "Extent Compacting Progress", extents.size())); int extentNumber = 0; for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) { _compactExtent(*i, extentNumber++, multiIndexBlock, compactOptions, &stats ); pm.hit(); } verify( d->firstExtent().ext()->xprev.isNull() ); // indexes will do their own progress meter? pm.finished(); log() << "starting index commits"; status = multiIndexBlock.commit(); if ( !status.isOK() ) return StatusWith<CompactStats>( status ); return StatusWith<CompactStats>( stats ); }
StatusWith<CompactStats> Collection::compact( OperationContext* txn, const CompactOptions* compactOptions ) { if ( !_recordStore->compactSupported() ) return StatusWith<CompactStats>( ErrorCodes::BadValue, str::stream() << "cannot compact collection with record store: " << _recordStore->name() ); if ( _indexCatalog.numIndexesInProgress() ) return StatusWith<CompactStats>( ErrorCodes::BadValue, "cannot compact when indexes in progress" ); // same data, but might perform a little different after compact? _infoCache.reset(); vector<BSONObj> indexSpecs; { IndexCatalog::IndexIterator ii( _indexCatalog.getIndexIterator( false ) ); while ( ii.more() ) { IndexDescriptor* descriptor = ii.next(); const BSONObj spec = _compactAdjustIndexSpec(descriptor->infoObj()); const BSONObj key = spec.getObjectField("key"); const Status keyStatus = validateKeyPattern(key); if (!keyStatus.isOK()) { return StatusWith<CompactStats>( ErrorCodes::CannotCreateIndex, str::stream() << "Cannot compact collection due to invalid index " << spec << ": " << keyStatus.reason() << " For more info see" << " http://dochub.mongodb.org/core/index-validation"); } indexSpecs.push_back(spec); } } // note that the drop indexes call also invalidates all clientcursors for the namespace, // which is important and wanted here log() << "compact dropping indexes" << endl; Status status = _indexCatalog.dropAllIndexes(txn, true); if ( !status.isOK() ) { return StatusWith<CompactStats>( status ); } txn->checkForInterrupt(); CompactStats stats; MultiIndexBlock multiIndexBlock(txn, this); status = multiIndexBlock.init( indexSpecs ); if ( !status.isOK() ) return StatusWith<CompactStats>( status ); MyCompactAdaptor adaptor(this, &multiIndexBlock); _recordStore->compact( txn, &adaptor, compactOptions, &stats ); log() << "starting index commits"; status = multiIndexBlock.commit(); if ( !status.isOK() ) return StatusWith<CompactStats>( status ); return StatusWith<CompactStats>( stats ); }
StatusWith<CompactStats> Collection::compact( const CompactOptions* compactOptions ) { if ( isCapped() ) return StatusWith<CompactStats>( ErrorCodes::BadValue, "cannot compact capped collection" ); if ( _indexCatalog.numIndexesInProgress() ) return StatusWith<CompactStats>( ErrorCodes::BadValue, "cannot compact when indexes in progress" ); NamespaceDetails* d = details(); // this is a big job, so might as well make things tidy before we start just to be nice. getDur().commitIfNeeded(); list<DiskLoc> extents; for( DiskLoc L = d->firstExtent(); !L.isNull(); L = L.ext()->xnext ) extents.push_back(L); log() << "compact " << extents.size() << " extents" << endl; // same data, but might perform a little different after compact? _infoCache.reset(); vector<BSONObj> indexSpecs; { IndexCatalog::IndexIterator ii( _indexCatalog.getIndexIterator( false ) ); while ( ii.more() ) { IndexDescriptor* descriptor = ii.next(); indexSpecs.push_back( _compactAdjustIndexSpec( descriptor->infoObj() ) ); } } log() << "compact orphan deleted lists" << endl; d->orphanDeletedList(); // Start over from scratch with our extent sizing and growth d->setLastExtentSize( 0 ); // before dropping indexes, at least make sure we can allocate one extent! if ( allocateSpaceForANewRecord( _ns.ns().c_str(), d, Record::HeaderSize+1, false).isNull() ) { return StatusWith<CompactStats>( ErrorCodes::InternalError, "compact error no space available to allocate" ); } // note that the drop indexes call also invalidates all clientcursors for the namespace, // which is important and wanted here log() << "compact dropping indexes" << endl; Status status = _indexCatalog.dropAllIndexes( true ); if ( !status.isOK() ) { return StatusWith<CompactStats>( status ); } getDur().commitIfNeeded(); CompactStats stats; OwnedPointerVector<IndexCatalog::IndexBuildBlock> indexBuildBlocks; vector<IndexAccessMethod*> indexesToInsertTo; vector< std::pair<IndexAccessMethod*,IndexAccessMethod*> > bulkToCommit; for ( size_t i = 0; i < indexSpecs.size(); i++ ) { killCurrentOp.checkForInterrupt(false); BSONObj info = indexSpecs[i]; info = _compactAdjustIndexSpec( info ); info = _indexCatalog.fixIndexSpec( info ); auto_ptr<IndexCatalog::IndexBuildBlock> block( new IndexCatalog::IndexBuildBlock( this,info ) ); Status status = block->init(); if ( !status.isOK() ) return StatusWith<CompactStats>(status); IndexAccessMethod* accessMethod = block->getEntry()->accessMethod(); status = accessMethod->initializeAsEmpty(); if ( !status.isOK() ) return StatusWith<CompactStats>(status); IndexAccessMethod* bulk = accessMethod->initiateBulk(); if ( bulk ) { indexesToInsertTo.push_back( bulk ); bulkToCommit.push_back( std::pair<IndexAccessMethod*,IndexAccessMethod*>( accessMethod, bulk ) ); } else { indexesToInsertTo.push_back( accessMethod ); } indexBuildBlocks.mutableVector().push_back( block.release() ); } // reset data size and record counts to 0 for this namespace // as we're about to tally them up again for each new extent d->setStats( 0, 0 ); ProgressMeterHolder pm(cc().curop()->setMessage("compact extent", "Extent Compacting Progress", extents.size())); int extentNumber = 0; for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) { _compactExtent(*i, extentNumber++, indexesToInsertTo, compactOptions, &stats ); pm.hit(); } verify( d->firstExtent().ext()->xprev.isNull() ); // indexes will do their own progress meter? pm.finished(); log() << "starting index commits"; for ( size_t i = 0; i < bulkToCommit.size(); i++ ) { bulkToCommit[i].first->commitBulk( bulkToCommit[i].second, false, NULL ); } for ( size_t i = 0; i < indexBuildBlocks.size(); i++ ) { IndexCatalog::IndexBuildBlock* block = indexBuildBlocks.mutableVector()[i]; block->success(); } return StatusWith<CompactStats>( stats ); }
Status MMAPV1Engine::repairDatabase( OperationContext* txn, const std::string& dbName, bool preserveClonedFilesOnFailure, bool backupOriginalFiles ) { unique_ptr<RepairFileDeleter> repairFileDeleter; // Must be done before and after repair getDur().syncDataAndTruncateJournal(txn); intmax_t totalSize = dbSize( dbName ); intmax_t freeSize = File::freeSpace(storageGlobalParams.repairpath); if ( freeSize > -1 && freeSize < totalSize ) { return Status( ErrorCodes::OutOfDiskSpace, str::stream() << "Cannot repair database " << dbName << " having size: " << totalSize << " (bytes) because free disk space is: " << freeSize << " (bytes)" ); } txn->checkForInterrupt(); Path reservedPath = uniqueReservedPath( ( preserveClonedFilesOnFailure || backupOriginalFiles ) ? "backup" : "_tmp" ); bool created = false; MONGO_ASSERT_ON_EXCEPTION( created = boost::filesystem::create_directory( reservedPath ) ); invariant( created ); string reservedPathString = reservedPath.string(); if ( !preserveClonedFilesOnFailure ) repairFileDeleter.reset( new RepairFileDeleter( txn, dbName, reservedPathString, reservedPath ) ); { Database* originalDatabase = dbHolder().openDb(txn, dbName); if (originalDatabase == NULL) { return Status(ErrorCodes::NamespaceNotFound, "database does not exist to repair"); } unique_ptr<MMAPV1DatabaseCatalogEntry> dbEntry; unique_ptr<Database> tempDatabase; // Must call this before MMAPV1DatabaseCatalogEntry's destructor closes the DB files ON_BLOCK_EXIT(&dur::DurableInterface::syncDataAndTruncateJournal, &getDur(), txn); { dbEntry.reset(new MMAPV1DatabaseCatalogEntry(txn, dbName, reservedPathString, storageGlobalParams.directoryperdb, true)); tempDatabase.reset( new Database(txn, dbName, dbEntry.get())); } map<string,CollectionOptions> namespacesToCopy; { string ns = dbName + ".system.namespaces"; OldClientContext ctx(txn, ns ); Collection* coll = originalDatabase->getCollection( ns ); if ( coll ) { auto cursor = coll->getCursor(txn); while (auto record = cursor->next()) { BSONObj obj = record->data.releaseToBson(); string ns = obj["name"].String(); NamespaceString nss( ns ); if ( nss.isSystem() ) { if ( nss.isSystemDotIndexes() ) continue; if ( nss.coll() == "system.namespaces" ) continue; } if ( !nss.isNormal() ) continue; CollectionOptions options; if ( obj["options"].isABSONObj() ) { Status status = options.parse( obj["options"].Obj() ); if ( !status.isOK() ) return status; } namespacesToCopy[ns] = options; } } } for ( map<string,CollectionOptions>::const_iterator i = namespacesToCopy.begin(); i != namespacesToCopy.end(); ++i ) { string ns = i->first; CollectionOptions options = i->second; Collection* tempCollection = NULL; { WriteUnitOfWork wunit(txn); tempCollection = tempDatabase->createCollection(txn, ns, options, false); wunit.commit(); } OldClientContext readContext(txn, ns, originalDatabase); Collection* originalCollection = originalDatabase->getCollection( ns ); invariant( originalCollection ); // data // TODO SERVER-14812 add a mode that drops duplicates rather than failing MultiIndexBlock indexer(txn, tempCollection ); { vector<BSONObj> indexes; IndexCatalog::IndexIterator ii = originalCollection->getIndexCatalog()->getIndexIterator( txn, false ); while ( ii.more() ) { IndexDescriptor* desc = ii.next(); indexes.push_back( desc->infoObj() ); } Status status = indexer.init( indexes ); if (!status.isOK()) { return status; } } auto cursor = originalCollection->getCursor(txn); while (auto record = cursor->next()) { BSONObj doc = record->data.releaseToBson(); WriteUnitOfWork wunit(txn); StatusWith<RecordId> result = tempCollection->insertDocument(txn, doc, &indexer, false); if ( !result.isOK() ) return result.getStatus(); wunit.commit(); txn->checkForInterrupt(); } Status status = indexer.doneInserting(); if (!status.isOK()) return status; { WriteUnitOfWork wunit(txn); indexer.commit(); wunit.commit(); } } getDur().syncDataAndTruncateJournal(txn); // need both in case journaling is disabled MongoFile::flushAll(true); txn->checkForInterrupt(); } // at this point if we abort, we don't want to delete new files // as they might be the only copies if ( repairFileDeleter.get() ) repairFileDeleter->success(); // Close the database so we can rename/delete the original data files dbHolder().close(txn, dbName); if ( backupOriginalFiles ) { _renameForBackup( dbName, reservedPath ); } else { // first make new directory before deleting data Path newDir = Path(storageGlobalParams.dbpath) / dbName; MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::create_directory(newDir)); // this deletes old files _deleteDataFiles( dbName ); if ( !boost::filesystem::exists(newDir) ) { // we deleted because of directoryperdb // re-create MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::create_directory(newDir)); } } _replaceWithRecovered( dbName, reservedPathString.c_str() ); if (!backupOriginalFiles) { MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::remove_all(reservedPath)); } // Reopen the database so it's discoverable dbHolder().openDb(txn, dbName); return Status::OK(); }
Status MMAPV1Engine::repairDatabase( OperationContext* txn, const std::string& dbName, bool preserveClonedFilesOnFailure, bool backupOriginalFiles ) { // We must hold some form of lock here invariant(txn->lockState()->threadState()); invariant( dbName.find( '.' ) == string::npos ); scoped_ptr<RepairFileDeleter> repairFileDeleter; log() << "repairDatabase " << dbName << endl; BackgroundOperation::assertNoBgOpInProgForDb(dbName); txn->recoveryUnit()->syncDataAndTruncateJournal(); // Must be done before and after repair intmax_t totalSize = dbSize( dbName ); intmax_t freeSize = File::freeSpace(storageGlobalParams.repairpath); if ( freeSize > -1 && freeSize < totalSize ) { return Status( ErrorCodes::OutOfDiskSpace, str::stream() << "Cannot repair database " << dbName << " having size: " << totalSize << " (bytes) because free disk space is: " << freeSize << " (bytes)" ); } txn->checkForInterrupt(); Path reservedPath = uniqueReservedPath( ( preserveClonedFilesOnFailure || backupOriginalFiles ) ? "backup" : "_tmp" ); MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::create_directory( reservedPath ) ); string reservedPathString = reservedPath.string(); if ( !preserveClonedFilesOnFailure ) repairFileDeleter.reset( new RepairFileDeleter( txn, dbName, reservedPathString, reservedPath ) ); { Database* originalDatabase = dbHolder().get(txn, dbName); if (originalDatabase == NULL) { return Status(ErrorCodes::NamespaceNotFound, "database does not exist to repair"); } scoped_ptr<MMAPV1DatabaseCatalogEntry> dbEntry; scoped_ptr<Database> tempDatabase; { dbEntry.reset( new MMAPV1DatabaseCatalogEntry( txn, dbName, reservedPathString, storageGlobalParams.directoryperdb, true ) ); invariant( !dbEntry->exists() ); tempDatabase.reset( new Database( txn, dbName, dbEntry.get() ) ); } map<string,CollectionOptions> namespacesToCopy; { string ns = dbName + ".system.namespaces"; Client::Context ctx(txn, ns ); Collection* coll = originalDatabase->getCollection( txn, ns ); if ( coll ) { scoped_ptr<RecordIterator> it( coll->getIterator( txn, DiskLoc(), false, CollectionScanParams::FORWARD ) ); while ( !it->isEOF() ) { DiskLoc loc = it->getNext(); BSONObj obj = coll->docFor( loc ); string ns = obj["name"].String(); NamespaceString nss( ns ); if ( nss.isSystem() ) { if ( nss.isSystemDotIndexes() ) continue; if ( nss.coll() == "system.namespaces" ) continue; } if ( !nss.isNormal() ) continue; CollectionOptions options; if ( obj["options"].isABSONObj() ) { Status status = options.parse( obj["options"].Obj() ); if ( !status.isOK() ) return status; } namespacesToCopy[ns] = options; } } } for ( map<string,CollectionOptions>::const_iterator i = namespacesToCopy.begin(); i != namespacesToCopy.end(); ++i ) { string ns = i->first; CollectionOptions options = i->second; Collection* tempCollection = NULL; { Client::Context tempContext(txn, ns, tempDatabase ); WriteUnitOfWork wunit(txn); tempCollection = tempDatabase->createCollection(txn, ns, options, true, false); wunit.commit(); } Client::Context readContext(txn, ns, originalDatabase); Collection* originalCollection = originalDatabase->getCollection( txn, ns ); invariant( originalCollection ); // data // TODO SERVER-14812 add a mode that drops duplicates rather than failing MultiIndexBlock indexer(txn, tempCollection ); { vector<BSONObj> indexes; IndexCatalog::IndexIterator ii = originalCollection->getIndexCatalog()->getIndexIterator( false ); while ( ii.more() ) { IndexDescriptor* desc = ii.next(); indexes.push_back( desc->infoObj() ); } Client::Context tempContext(txn, ns, tempDatabase); Status status = indexer.init( indexes ); if ( !status.isOK() ) return status; } scoped_ptr<RecordIterator> iterator( originalCollection->getIterator( txn, DiskLoc(), false, CollectionScanParams::FORWARD )); while ( !iterator->isEOF() ) { DiskLoc loc = iterator->getNext(); invariant( !loc.isNull() ); BSONObj doc = originalCollection->docFor( loc ); Client::Context tempContext(txn, ns, tempDatabase); WriteUnitOfWork wunit(txn); StatusWith<DiskLoc> result = tempCollection->insertDocument(txn, doc, &indexer, false); if ( !result.isOK() ) return result.getStatus(); wunit.commit(); txn->checkForInterrupt(false); } Status status = indexer.doneInserting(); if (!status.isOK()) return status; { Client::Context tempContext(txn, ns, tempDatabase); WriteUnitOfWork wunit(txn); indexer.commit(); wunit.commit(); } } txn->recoveryUnit()->syncDataAndTruncateJournal(); globalStorageEngine->flushAllFiles(true); // need both in case journaling is disabled txn->checkForInterrupt(false); } // at this point if we abort, we don't want to delete new files // as they might be the only copies if ( repairFileDeleter.get() ) repairFileDeleter->success(); dbHolder().close( txn, dbName ); if ( backupOriginalFiles ) { _renameForBackup( dbName, reservedPath ); } else { // first make new directory before deleting data Path newDir = Path(storageGlobalParams.dbpath) / dbName; MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::create_directory(newDir)); // this deletes old files _deleteDataFiles( dbName ); if ( !boost::filesystem::exists(newDir) ) { // we deleted because of directoryperdb // re-create MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::create_directory(newDir)); } } _replaceWithRecovered( dbName, reservedPathString.c_str() ); if ( !backupOriginalFiles ) MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::remove_all( reservedPath ) ); return Status::OK(); }
StatusWith<CompactStats> compactCollection(OperationContext* opCtx, Collection* collection, const CompactOptions* compactOptions) { dassert(opCtx->lockState()->isCollectionLockedForMode(collection->ns().toString(), MODE_X)); DisableDocumentValidation validationDisabler(opCtx); auto recordStore = collection->getRecordStore(); auto indexCatalog = collection->getIndexCatalog(); if (!recordStore->compactSupported()) return StatusWith<CompactStats>(ErrorCodes::CommandNotSupported, str::stream() << "cannot compact collection with record store: " << recordStore->name()); if (recordStore->compactsInPlace()) { CompactStats stats; Status status = recordStore->compact(opCtx); if (!status.isOK()) return StatusWith<CompactStats>(status); // Compact all indexes (not including unfinished indexes) std::unique_ptr<IndexCatalog::IndexIterator> ii( indexCatalog->getIndexIterator(opCtx, false)); while (ii->more()) { IndexCatalogEntry* entry = ii->next(); IndexDescriptor* descriptor = entry->descriptor(); IndexAccessMethod* iam = entry->accessMethod(); LOG(1) << "compacting index: " << descriptor->toString(); Status status = iam->compact(opCtx); if (!status.isOK()) { error() << "failed to compact index: " << descriptor->toString(); return status; } } return StatusWith<CompactStats>(stats); } if (indexCatalog->numIndexesInProgress(opCtx)) return StatusWith<CompactStats>(ErrorCodes::BadValue, "cannot compact when indexes in progress"); std::vector<BSONObj> indexSpecs; { std::unique_ptr<IndexCatalog::IndexIterator> ii( indexCatalog->getIndexIterator(opCtx, false)); while (ii->more()) { IndexDescriptor* descriptor = ii->next()->descriptor(); // Compact always creates the new index in the foreground. const BSONObj spec = descriptor->infoObj().removeField(IndexDescriptor::kBackgroundFieldName); const BSONObj key = spec.getObjectField("key"); const Status keyStatus = index_key_validate::validateKeyPattern(key, descriptor->version()); if (!keyStatus.isOK()) { return StatusWith<CompactStats>( ErrorCodes::CannotCreateIndex, str::stream() << "Cannot compact collection due to invalid index " << spec << ": " << keyStatus.reason() << " For more info see" << " http://dochub.mongodb.org/core/index-validation"); } indexSpecs.push_back(spec); } } // Give a chance to be interrupted *before* we drop all indexes. opCtx->checkForInterrupt(); { // note that the drop indexes call also invalidates all clientcursors for the namespace, // which is important and wanted here WriteUnitOfWork wunit(opCtx); log() << "compact dropping indexes"; indexCatalog->dropAllIndexes(opCtx, true); wunit.commit(); } CompactStats stats; MultiIndexBlockImpl indexer(opCtx, collection); indexer.allowInterruption(); indexer.ignoreUniqueConstraint(); // in compact we should be doing no checking Status status = indexer.init(indexSpecs).getStatus(); if (!status.isOK()) return StatusWith<CompactStats>(status); status = recordStore->compact(opCtx); if (!status.isOK()) return StatusWith<CompactStats>(status); log() << "starting index commits"; status = indexer.dumpInsertsFromBulk(); if (!status.isOK()) return StatusWith<CompactStats>(status); { WriteUnitOfWork wunit(opCtx); status = indexer.commit(); if (!status.isOK()) { return StatusWith<CompactStats>(status); } wunit.commit(); } return StatusWith<CompactStats>(stats); }
StatusWith<CompactStats> Collection::compact(OperationContext* txn, const CompactOptions* compactOptions) { dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X)); DisableDocumentValidation validationDisabler(txn); if (!_recordStore->compactSupported()) return StatusWith<CompactStats>(ErrorCodes::CommandNotSupported, str::stream() << "cannot compact collection with record store: " << _recordStore->name()); if (_recordStore->compactsInPlace()) { CompactStats stats; Status status = _recordStore->compact(txn, NULL, compactOptions, &stats); if (!status.isOK()) return StatusWith<CompactStats>(status); // Compact all indexes (not including unfinished indexes) IndexCatalog::IndexIterator ii(_indexCatalog.getIndexIterator(txn, false)); while (ii.more()) { IndexDescriptor* descriptor = ii.next(); IndexAccessMethod* index = _indexCatalog.getIndex(descriptor); LOG(1) << "compacting index: " << descriptor->toString(); Status status = index->compact(txn); if (!status.isOK()) { error() << "failed to compact index: " << descriptor->toString(); return status; } } return StatusWith<CompactStats>(stats); } if (_indexCatalog.numIndexesInProgress(txn)) return StatusWith<CompactStats>(ErrorCodes::BadValue, "cannot compact when indexes in progress"); vector<BSONObj> indexSpecs; { IndexCatalog::IndexIterator ii(_indexCatalog.getIndexIterator(txn, false)); while (ii.more()) { IndexDescriptor* descriptor = ii.next(); const BSONObj spec = _compactAdjustIndexSpec(descriptor->infoObj()); const BSONObj key = spec.getObjectField("key"); const Status keyStatus = validateKeyPattern(key); if (!keyStatus.isOK()) { return StatusWith<CompactStats>( ErrorCodes::CannotCreateIndex, str::stream() << "Cannot compact collection due to invalid index " << spec << ": " << keyStatus.reason() << " For more info see" << " http://dochub.mongodb.org/core/index-validation"); } indexSpecs.push_back(spec); } } // Give a chance to be interrupted *before* we drop all indexes. txn->checkForInterrupt(); { // note that the drop indexes call also invalidates all clientcursors for the namespace, // which is important and wanted here WriteUnitOfWork wunit(txn); log() << "compact dropping indexes"; Status status = _indexCatalog.dropAllIndexes(txn, true); if (!status.isOK()) { return StatusWith<CompactStats>(status); } wunit.commit(); } CompactStats stats; MultiIndexBlock indexer(txn, this); indexer.allowInterruption(); indexer.ignoreUniqueConstraint(); // in compact we should be doing no checking Status status = indexer.init(indexSpecs); if (!status.isOK()) return StatusWith<CompactStats>(status); MyCompactAdaptor adaptor(this, &indexer); status = _recordStore->compact(txn, &adaptor, compactOptions, &stats); if (!status.isOK()) return StatusWith<CompactStats>(status); log() << "starting index commits"; status = indexer.doneInserting(); if (!status.isOK()) return StatusWith<CompactStats>(status); { WriteUnitOfWork wunit(txn); indexer.commit(); wunit.commit(); } return StatusWith<CompactStats>(stats); }
Status repairDatabase( string dbName, bool preserveClonedFilesOnFailure, bool backupOriginalFiles ) { scoped_ptr<RepairFileDeleter> repairFileDeleter; doingRepair dr; dbName = nsToDatabase( dbName ); log() << "repairDatabase " << dbName << endl; invariant( cc().database()->name() == dbName ); invariant( cc().database()->path() == storageGlobalParams.dbpath ); BackgroundOperation::assertNoBgOpInProgForDb(dbName); getDur().syncDataAndTruncateJournal(); // Must be done before and after repair intmax_t totalSize = dbSize( dbName ); intmax_t freeSize = File::freeSpace(storageGlobalParams.repairpath); if ( freeSize > -1 && freeSize < totalSize ) { return Status( ErrorCodes::OutOfDiskSpace, str::stream() << "Cannot repair database " << dbName << " having size: " << totalSize << " (bytes) because free disk space is: " << freeSize << " (bytes)" ); } killCurrentOp.checkForInterrupt(); Path reservedPath = uniqueReservedPath( ( preserveClonedFilesOnFailure || backupOriginalFiles ) ? "backup" : "_tmp" ); MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::create_directory( reservedPath ) ); string reservedPathString = reservedPath.string(); if ( !preserveClonedFilesOnFailure ) repairFileDeleter.reset( new RepairFileDeleter( dbName, reservedPathString, reservedPath ) ); { Database* originalDatabase = dbHolder().get( dbName, storageGlobalParams.dbpath ); if ( originalDatabase == NULL ) return Status( ErrorCodes::NamespaceNotFound, "database does not exist to repair" ); Database* tempDatabase = NULL; { bool justCreated = false; tempDatabase = dbHolderW().getOrCreate( dbName, reservedPathString, justCreated ); invariant( justCreated ); } map<string,CollectionOptions> namespacesToCopy; { string ns = dbName + ".system.namespaces"; Client::Context ctx( ns ); Collection* coll = originalDatabase->getCollection( ns ); if ( coll ) { scoped_ptr<CollectionIterator> it( coll->getIterator( DiskLoc(), false, CollectionScanParams::FORWARD ) ); while ( !it->isEOF() ) { DiskLoc loc = it->getNext(); BSONObj obj = coll->docFor( loc ); string ns = obj["name"].String(); NamespaceString nss( ns ); if ( nss.isSystem() ) { if ( nss.isSystemDotIndexes() ) continue; if ( nss.coll() == "system.namespaces" ) continue; } if ( !nss.isNormal() ) continue; CollectionOptions options; if ( obj["options"].isABSONObj() ) { Status status = options.parse( obj["options"].Obj() ); if ( !status.isOK() ) return status; } namespacesToCopy[ns] = options; } } } for ( map<string,CollectionOptions>::const_iterator i = namespacesToCopy.begin(); i != namespacesToCopy.end(); ++i ) { string ns = i->first; CollectionOptions options = i->second; Collection* tempCollection = NULL; { Client::Context tempContext( ns, tempDatabase ); tempCollection = tempDatabase->createCollection( ns, options, true, false ); } Client::Context readContext( ns, originalDatabase ); Collection* originalCollection = originalDatabase->getCollection( ns ); invariant( originalCollection ); // data MultiIndexBlock indexBlock( tempCollection ); { vector<BSONObj> indexes; IndexCatalog::IndexIterator ii = originalCollection->getIndexCatalog()->getIndexIterator( false ); while ( ii.more() ) { IndexDescriptor* desc = ii.next(); indexes.push_back( desc->infoObj() ); } Client::Context tempContext( ns, tempDatabase ); Status status = indexBlock.init( indexes ); if ( !status.isOK() ) return status; } scoped_ptr<CollectionIterator> iterator( originalCollection->getIterator( DiskLoc(), false, CollectionScanParams::FORWARD ) ); while ( !iterator->isEOF() ) { DiskLoc loc = iterator->getNext(); invariant( !loc.isNull() ); BSONObj doc = originalCollection->docFor( loc ); Client::Context tempContext( ns, tempDatabase ); StatusWith<DiskLoc> result = tempCollection->insertDocument( doc, indexBlock ); if ( !result.isOK() ) return result.getStatus(); getDur().commitIfNeeded(); killCurrentOp.checkForInterrupt(false); } { Client::Context tempContext( ns, tempDatabase ); Status status = indexBlock.commit(); if ( !status.isOK() ) return status; } } getDur().syncDataAndTruncateJournal(); MongoFile::flushAll(true); // need both in case journaling is disabled killCurrentOp.checkForInterrupt(false); Client::Context tempContext( dbName, reservedPathString ); Database::closeDatabase( dbName, reservedPathString ); } // at this point if we abort, we don't want to delete new files // as they might be the only copies if ( repairFileDeleter.get() ) repairFileDeleter->success(); Client::Context ctx( dbName ); Database::closeDatabase(dbName, storageGlobalParams.dbpath); if ( backupOriginalFiles ) { _renameForBackup( dbName, reservedPath ); } else { // first make new directory before deleting data Path newDir = Path(storageGlobalParams.dbpath) / dbName; MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::create_directory(newDir)); // this deletes old files _deleteDataFiles( dbName ); if ( !boost::filesystem::exists(newDir) ) { // we deleted because of directoryperdb // re-create MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::create_directory(newDir)); } } _replaceWithRecovered( dbName, reservedPathString.c_str() ); if ( !backupOriginalFiles ) MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::remove_all( reservedPath ) ); return Status::OK(); }
void CollectionInfoCache::computeIndexKeys(OperationContext* opCtx) { _indexedPaths.clear(); bool hadTTLIndex = _hasTTLIndex; _hasTTLIndex = false; IndexCatalog::IndexIterator i = _collection->getIndexCatalog()->getIndexIterator(opCtx, true); while (i.more()) { IndexDescriptor* descriptor = i.next(); if (descriptor->getAccessMethodName() != IndexNames::TEXT) { BSONObj key = descriptor->keyPattern(); const BSONObj& infoObj = descriptor->infoObj(); if (infoObj.hasField("expireAfterSeconds")) { _hasTTLIndex = true; } BSONObjIterator j(key); while (j.more()) { BSONElement e = j.next(); _indexedPaths.addPath(e.fieldName()); } } else { fts::FTSSpec ftsSpec(descriptor->infoObj()); if (ftsSpec.wildcard()) { _indexedPaths.allPathsIndexed(); } else { for (size_t i = 0; i < ftsSpec.numExtraBefore(); ++i) { _indexedPaths.addPath(ftsSpec.extraBefore(i)); } for (fts::Weights::const_iterator it = ftsSpec.weights().begin(); it != ftsSpec.weights().end(); ++it) { _indexedPaths.addPath(it->first); } for (size_t i = 0; i < ftsSpec.numExtraAfter(); ++i) { _indexedPaths.addPath(ftsSpec.extraAfter(i)); } // Any update to a path containing "language" as a component could change the // language of a subdocument. Add the override field as a path component. _indexedPaths.addPathComponent(ftsSpec.languageOverrideField()); } } // handle partial indexes const IndexCatalogEntry* entry = i.catalogEntry(descriptor); const MatchExpression* filter = entry->getFilterExpression(); if (filter) { unordered_set<std::string> paths; QueryPlannerIXSelect::getFields(filter, "", &paths); for (auto it = paths.begin(); it != paths.end(); ++it) { _indexedPaths.addPath(*it); } } } TTLCollectionCache& ttlCollectionCache = TTLCollectionCache::get(getGlobalServiceContext()); if (_hasTTLIndex != hadTTLIndex) { if (_hasTTLIndex) { ttlCollectionCache.registerCollection(_collection->ns()); } else { ttlCollectionCache.unregisterCollection(_collection->ns()); } } _keysComputed = true; }