void run() { // Create a new collection. Database* db = _ctx.db(); Collection* coll; { WriteUnitOfWork wunit(&_opCtx); ASSERT_OK(db->dropCollection(&_opCtx, _nss)); coll = db->createCollection(&_opCtx, _nss); OpDebug* const nullOpDebug = nullptr; ASSERT_OK(coll->insertDocument(&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << "dup")), nullOpDebug, true)); ASSERT_OK(coll->insertDocument(&_opCtx, InsertStatement(BSON("_id" << 2 << "a" << "dup")), nullOpDebug, true)); wunit.commit(); } MultiIndexBlock indexer; const BSONObj spec = BSON("name" << "a" << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v" << static_cast<int>(kIndexVersion) << "unique" << true << "background" << background); ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, coll); }); ASSERT_OK(indexer.init(&_opCtx, coll, spec, MultiIndexBlock::kNoopOnInitFn).getStatus()); auto desc = coll->getIndexCatalog()->findIndexByName(&_opCtx, "a", true /* includeUnfinished */); ASSERT(desc); // Hybrid index builds check duplicates explicitly. ASSERT_OK(indexer.insertAllDocumentsInCollection(&_opCtx, coll)); auto status = indexer.checkConstraints(&_opCtx); ASSERT_EQUALS(status.code(), ErrorCodes::DuplicateKey); }
void run() { // Create a new collection. Database* db = _ctx.db(); Collection* coll; { WriteUnitOfWork wunit(&_opCtx); ASSERT_OK(db->dropCollection(&_opCtx, _nss)); coll = db->createCollection(&_opCtx, _nss); OpDebug* const nullOpDebug = nullptr; ASSERT_OK(coll->insertDocument(&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << "dup")), nullOpDebug, true)); ASSERT_OK(coll->insertDocument(&_opCtx, InsertStatement(BSON("_id" << 2 << "a" << "dup")), nullOpDebug, true)); wunit.commit(); } MultiIndexBlock indexer; indexer.ignoreUniqueConstraint(); const BSONObj spec = BSON("name" << "a" << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v" << static_cast<int>(kIndexVersion) << "unique" << true << "background" << background); ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, coll); }); ASSERT_OK(indexer.init(&_opCtx, coll, spec, MultiIndexBlock::kNoopOnInitFn).getStatus()); ASSERT_OK(indexer.insertAllDocumentsInCollection(&_opCtx, coll)); WriteUnitOfWork wunit(&_opCtx); ASSERT_OK(indexer.commit( &_opCtx, coll, MultiIndexBlock::kNoopOnCreateEachFn, MultiIndexBlock::kNoopOnCommitFn)); wunit.commit(); }
bool buildIndexInterrupted(const BSONObj& key) { try { MultiIndexBlock indexer; ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, collection()); }); uassertStatusOK( indexer.init(&_opCtx, collection(), key, MultiIndexBlock::kNoopOnInitFn)); uassertStatusOK(indexer.insertAllDocumentsInCollection(&_opCtx, collection())); WriteUnitOfWork wunit(&_opCtx); ASSERT_OK(indexer.commit(&_opCtx, collection(), MultiIndexBlock::kNoopOnCreateEachFn, MultiIndexBlock::kNoopOnCommitFn)); wunit.commit(); } catch (const DBException& e) { if (ErrorCodes::isInterruption(e.code())) return true; throw; } return false; }
Status IndexBuildBase::createIndex(const std::string& dbname, const BSONObj& indexSpec) { MultiIndexBlock indexer; ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, collection()); }); Status status = indexer.init(&_opCtx, collection(), indexSpec, MultiIndexBlock::kNoopOnInitFn).getStatus(); if (status == ErrorCodes::IndexAlreadyExists) { return Status::OK(); } if (!status.isOK()) { return status; } status = indexer.insertAllDocumentsInCollection(&_opCtx, collection()); if (!status.isOK()) { return status; } WriteUnitOfWork wunit(&_opCtx); ASSERT_OK(indexer.commit(&_opCtx, collection(), MultiIndexBlock::kNoopOnCreateEachFn, MultiIndexBlock::kNoopOnCommitFn)); wunit.commit(); return Status::OK(); }
StatusWith<CompactStats> compactCollection(OperationContext* opCtx, Collection* collection, const CompactOptions* compactOptions) { dassert(opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_X)); DisableDocumentValidation validationDisabler(opCtx); auto recordStore = collection->getRecordStore(); auto indexCatalog = collection->getIndexCatalog(); if (!recordStore->compactSupported()) return StatusWith<CompactStats>(ErrorCodes::CommandNotSupported, str::stream() << "cannot compact collection with record store: " << recordStore->name()); if (recordStore->compactsInPlace()) { CompactStats stats; Status status = recordStore->compact(opCtx); if (!status.isOK()) return StatusWith<CompactStats>(status); // Compact all indexes (not including unfinished indexes) status = indexCatalog->compactIndexes(opCtx); if (!status.isOK()) return StatusWith<CompactStats>(status); return StatusWith<CompactStats>(stats); } if (indexCatalog->numIndexesInProgress(opCtx)) return StatusWith<CompactStats>(ErrorCodes::BadValue, "cannot compact when indexes in progress"); std::vector<BSONObj> indexSpecs; { std::unique_ptr<IndexCatalog::IndexIterator> ii( indexCatalog->getIndexIterator(opCtx, false)); while (ii->more()) { const IndexDescriptor* descriptor = ii->next()->descriptor(); // Compact always creates the new index in the foreground. const BSONObj spec = descriptor->infoObj().removeField(IndexDescriptor::kBackgroundFieldName); const BSONObj key = spec.getObjectField("key"); const Status keyStatus = index_key_validate::validateKeyPattern(key, descriptor->version()); if (!keyStatus.isOK()) { return StatusWith<CompactStats>( ErrorCodes::CannotCreateIndex, str::stream() << "Cannot compact collection due to invalid index " << spec << ": " << keyStatus.reason() << " For more info see" << " http://dochub.mongodb.org/core/index-validation"); } indexSpecs.push_back(spec); } } // Give a chance to be interrupted *before* we drop all indexes. opCtx->checkForInterrupt(); { // note that the drop indexes call also invalidates all clientcursors for the namespace, // which is important and wanted here WriteUnitOfWork wunit(opCtx); log() << "compact dropping indexes"; indexCatalog->dropAllIndexes(opCtx, true); wunit.commit(); } CompactStats stats; MultiIndexBlock indexer; indexer.ignoreUniqueConstraint(); // in compact we should be doing no checking // The 'indexer' could throw, so ensure build cleanup occurs. ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(opCtx, collection); }); Status status = indexer.init(opCtx, collection, indexSpecs, MultiIndexBlock::kNoopOnInitFn).getStatus(); if (!status.isOK()) return StatusWith<CompactStats>(status); status = recordStore->compact(opCtx); if (!status.isOK()) return StatusWith<CompactStats>(status); log() << "starting index commits"; status = indexer.dumpInsertsFromBulk(opCtx); if (!status.isOK()) return StatusWith<CompactStats>(status); { WriteUnitOfWork wunit(opCtx); status = indexer.commit(opCtx, collection, MultiIndexBlock::kNoopOnCreateEachFn, MultiIndexBlock::kNoopOnCommitFn); if (!status.isOK()) { return StatusWith<CompactStats>(status); } wunit.commit(); } return StatusWith<CompactStats>(stats); }