Status IndexAccessMethod::update(OperationContext* txn, const UpdateTicket& ticket, int64_t* numUpdated) { if (!ticket._isValid) { return Status(ErrorCodes::InternalError, "Invalid UpdateTicket in update"); } if (ticket.oldKeys.size() + ticket.added.size() - ticket.removed.size() > 1) { _btreeState->setMultikey(txn); } for (size_t i = 0; i < ticket.removed.size(); ++i) { _newInterface->unindex(txn, *ticket.removed[i], ticket.loc, ticket.dupsAllowed); } for (size_t i = 0; i < ticket.added.size(); ++i) { Status status = _newInterface->insert(txn, *ticket.added[i], ticket.loc, ticket.dupsAllowed); if (!status.isOK()) { if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) { // Ignore. continue; } return status; } } *numUpdated = ticket.added.size(); return Status::OK(); }
Status BtreeBasedAccessMethod::update(OperationContext* txn, const UpdateTicket& ticket, int64_t* numUpdated) { if (!ticket._isValid) { return Status(ErrorCodes::InternalError, "Invalid UpdateTicket in update"); } BtreeBasedPrivateUpdateData* data = static_cast<BtreeBasedPrivateUpdateData*>(ticket._indexSpecificUpdateData.get()); if (data->oldKeys.size() + data->added.size() - data->removed.size() > 1) { _btreeState->setMultikey(txn); } for (size_t i = 0; i < data->removed.size(); ++i) { _newInterface->unindex(txn, *data->removed[i], data->loc, data->dupsAllowed); } for (size_t i = 0; i < data->added.size(); ++i) { Status status = _newInterface->insert(txn, *data->added[i], data->loc, data->dupsAllowed); if (!status.isOK()) { if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) { // Ignore. continue; } return status; } } *numUpdated = data->added.size(); return Status::OK(); }
Status IndexAccessMethod::update(OperationContext* opCtx, const UpdateTicket& ticket, int64_t* numInserted, int64_t* numDeleted) { invariant(numInserted); invariant(numDeleted); *numInserted = 0; *numDeleted = 0; if (!ticket._isValid) { return Status(ErrorCodes::InternalError, "Invalid UpdateTicket in update"); } if (ticket.oldKeys.size() + ticket.added.size() - ticket.removed.size() > 1 || isMultikeyFromPaths(ticket.newMultikeyPaths)) { _btreeState->setMultikey(opCtx, ticket.newMultikeyPaths); } for (size_t i = 0; i < ticket.removed.size(); ++i) { _newInterface->unindex(opCtx, ticket.removed[i], ticket.loc, ticket.dupsAllowed); IndexKeyEntry indexEntry = IndexKeyEntry(ticket.removed[i], ticket.loc); } for (size_t i = 0; i < ticket.added.size(); ++i) { Status status = _newInterface->insert(opCtx, ticket.added[i], ticket.loc, ticket.dupsAllowed); if (!status.isOK()) { if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(opCtx)) { // Ignore. IndexKeyEntry indexEntry = IndexKeyEntry(ticket.added[i], ticket.loc); continue; } return status; } IndexKeyEntry indexEntry = IndexKeyEntry(ticket.added[i], ticket.loc); } *numInserted = ticket.added.size(); *numDeleted = ticket.removed.size(); return Status::OK(); }
Status IndexAccessMethod::commitBulk(OperationContext* txn, std::unique_ptr<BulkBuilder> bulk, bool mayInterrupt, bool dupsAllowed, set<RecordId>* dupsToDrop) { Timer timer; std::unique_ptr<BulkBuilder::Sorter::Iterator> i(bulk->_sorter->done()); stdx::unique_lock<Client> lk(*txn->getClient()); ProgressMeterHolder pm(*txn->setMessage_inlock("Index Bulk Build: (2/3) btree bottom up", "Index: (2/3) BTree Bottom Up Progress", bulk->_keysInserted, 10)); lk.unlock(); std::unique_ptr<SortedDataBuilderInterface> builder; MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { WriteUnitOfWork wunit(txn); if (bulk->_everGeneratedMultipleKeys || isMultikeyFromPaths(bulk->_indexMultikeyPaths)) { _btreeState->setMultikey(txn, bulk->_indexMultikeyPaths); } builder.reset(_newInterface->getBulkBuilder(txn, dupsAllowed)); wunit.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "setting index multikey flag", ""); while (i->more()) { if (mayInterrupt) { txn->checkForInterrupt(); } WriteUnitOfWork wunit(txn); // Improve performance in the btree-building phase by disabling rollback tracking. // This avoids copying all the written bytes to a buffer that is only used to roll back. // Note that this is safe to do, as this entire index-build-in-progress will be cleaned // up by the index system. txn->recoveryUnit()->setRollbackWritesDisabled(); // Get the next datum and add it to the builder. BulkBuilder::Sorter::Data d = i->next(); Status status = builder->addKey(d.first, d.second); if (!status.isOK()) { // Overlong key that's OK to skip? if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) { continue; } // Check if this is a duplicate that's OK to skip if (status.code() == ErrorCodes::DuplicateKey) { invariant(!dupsAllowed); // shouldn't be getting DupKey errors if dupsAllowed. if (dupsToDrop) { dupsToDrop->insert(d.second); continue; } } return status; } // If we're here either it's a dup and we're cool with it or the addKey went just // fine. pm.hit(); wunit.commit(); } pm.finished(); { stdx::lock_guard<Client> lk(*txn->getClient()); CurOp::get(txn)->setMessage_inlock("Index Bulk Build: (3/3) btree-middle", "Index: (3/3) BTree Middle Progress"); } LOG(timer.seconds() > 10 ? 0 : 1) << "\t done building bottom layer, going to commit"; builder->commit(mayInterrupt); return Status::OK(); }
Status IndexAccessMethod::commitBulk(OperationContext* opCtx, std::unique_ptr<BulkBuilder> bulk, bool mayInterrupt, bool dupsAllowed, set<RecordId>* dupsToDrop, bool assignTimestamp) { // Do not track multikey path info for index builds. ScopeGuard restartTracker = MakeGuard([opCtx] { MultikeyPathTracker::get(opCtx).startTrackingMultikeyPathInfo(); }); if (!MultikeyPathTracker::get(opCtx).isTrackingMultikeyPathInfo()) { restartTracker.Dismiss(); } MultikeyPathTracker::get(opCtx).stopTrackingMultikeyPathInfo(); Timer timer; std::unique_ptr<BulkBuilder::Sorter::Iterator> i(bulk->_sorter->done()); stdx::unique_lock<Client> lk(*opCtx->getClient()); ProgressMeterHolder pm( CurOp::get(opCtx)->setMessage_inlock("Index Bulk Build: (2/3) btree bottom up", "Index: (2/3) BTree Bottom Up Progress", bulk->_keysInserted, 10)); lk.unlock(); std::unique_ptr<SortedDataBuilderInterface> builder; writeConflictRetry(opCtx, "setting index multikey flag", "", [&] { WriteUnitOfWork wunit(opCtx); if (bulk->_everGeneratedMultipleKeys || isMultikeyFromPaths(bulk->_indexMultikeyPaths)) { _btreeState->setMultikey(opCtx, bulk->_indexMultikeyPaths); } builder.reset(_newInterface->getBulkBuilder(opCtx, dupsAllowed)); if (assignTimestamp) { fassertStatusOK(50705, opCtx->recoveryUnit()->setTimestamp( LogicalClock::get(opCtx)->getClusterTime().asTimestamp())); } wunit.commit(); }); while (i->more()) { if (mayInterrupt) { opCtx->checkForInterrupt(); } WriteUnitOfWork wunit(opCtx); // Improve performance in the btree-building phase by disabling rollback tracking. // This avoids copying all the written bytes to a buffer that is only used to roll back. // Note that this is safe to do, as this entire index-build-in-progress will be cleaned // up by the index system. opCtx->recoveryUnit()->setRollbackWritesDisabled(); // Get the next datum and add it to the builder. BulkBuilder::Sorter::Data d = i->next(); Status status = builder->addKey(d.first, d.second); if (!status.isOK()) { // Overlong key that's OK to skip? if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(opCtx)) { continue; } // Check if this is a duplicate that's OK to skip if (status.code() == ErrorCodes::DuplicateKey) { invariant(!dupsAllowed); // shouldn't be getting DupKey errors if dupsAllowed. if (dupsToDrop) { dupsToDrop->insert(d.second); continue; } } return status; } // If we're here either it's a dup and we're cool with it or the addKey went just // fine. pm.hit(); if (assignTimestamp) { fassertStatusOK(50704, opCtx->recoveryUnit()->setTimestamp( LogicalClock::get(opCtx)->getClusterTime().asTimestamp())); } wunit.commit(); } pm.finished(); { stdx::lock_guard<Client> lk(*opCtx->getClient()); CurOp::get(opCtx)->setMessage_inlock("Index Bulk Build: (3/3) btree-middle", "Index: (3/3) BTree Middle Progress"); } LOG(timer.seconds() > 10 ? 0 : 1) << "\t done building bottom layer, going to commit"; std::unique_ptr<TimestampBlock> tsBlock; if (assignTimestamp) { tsBlock = stdx::make_unique<TimestampBlock>( opCtx, LogicalClock::get(opCtx)->getClusterTime().asTimestamp()); } builder->commit(mayInterrupt); return Status::OK(); }
Status AbstractIndexAccessMethod::commitBulk(OperationContext* opCtx, BulkBuilder* bulk, bool mayInterrupt, bool dupsAllowed, set<RecordId>* dupRecords, std::vector<BSONObj>* dupKeysInserted) { // Cannot simultaneously report uninserted duplicates 'dupRecords' and inserted duplicates // 'dupKeysInserted'. invariant(!(dupRecords && dupKeysInserted)); Timer timer; std::unique_ptr<BulkBuilder::Sorter::Iterator> it(bulk->done()); static const char* message = "Index Build: inserting keys from external sorter into index"; ProgressMeterHolder pm; { stdx::unique_lock<Client> lk(*opCtx->getClient()); pm.set(CurOp::get(opCtx)->setProgress_inlock( message, bulk->getKeysInserted(), 3 /* secondsBetween */)); } auto builder = std::unique_ptr<SortedDataBuilderInterface>( _newInterface->getBulkBuilder(opCtx, dupsAllowed)); bool checkIndexKeySize = shouldCheckIndexKeySize(opCtx); BSONObj previousKey; const Ordering ordering = Ordering::make(_descriptor->keyPattern()); while (it->more()) { if (mayInterrupt) { opCtx->checkForInterrupt(); } WriteUnitOfWork wunit(opCtx); // Get the next datum and add it to the builder. BulkBuilder::Sorter::Data data = it->next(); // Before attempting to insert, perform a duplicate key check. bool isDup = false; if (_descriptor->unique()) { isDup = data.first.woCompare(previousKey, ordering) == 0; if (isDup && !dupsAllowed) { if (dupRecords) { dupRecords->insert(data.second); continue; } return buildDupKeyErrorStatus(data.first, _descriptor->parentNS(), _descriptor->indexName(), _descriptor->keyPattern()); } } Status status = checkIndexKeySize ? checkKeySize(data.first) : Status::OK(); if (status.isOK()) { StatusWith<SpecialFormatInserted> ret = builder->addKey(data.first, data.second); status = ret.getStatus(); if (status.isOK() && ret.getValue() == SpecialFormatInserted::LongTypeBitsInserted) _btreeState->setIndexKeyStringWithLongTypeBitsExistsOnDisk(opCtx); } if (!status.isOK()) { // Duplicates are checked before inserting. invariant(status.code() != ErrorCodes::DuplicateKey); // Overlong key that's OK to skip? // TODO SERVER-36385: Remove this when there is no KeyTooLong error. if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong()) { continue; } return status; } previousKey = data.first.getOwned(); if (isDup && dupsAllowed && dupKeysInserted) { dupKeysInserted->push_back(data.first.getOwned()); } // If we're here either it's a dup and we're cool with it or the addKey went just fine. pm.hit(); wunit.commit(); } pm.finished(); log() << "index build: inserted keys from external sorter into index in " << timer.seconds() << " seconds"; WriteUnitOfWork wunit(opCtx); SpecialFormatInserted specialFormatInserted = builder->commit(mayInterrupt); // It's ok to insert KeyStrings with long TypeBits but we need to mark the feature // tracker bit so that downgrade binary which cannot read the long TypeBits fails to // start up. if (specialFormatInserted == SpecialFormatInserted::LongTypeBitsInserted) _btreeState->setIndexKeyStringWithLongTypeBitsExistsOnDisk(opCtx); wunit.commit(); return Status::OK(); }