virtual StatusWith<SpecialFormatInserted> insert(OperationContext* opCtx, const BSONObj& key, const RecordId& loc, bool dupsAllowed) { invariant(loc.isValid()); invariant(!hasFieldNames(key)); // TODO optimization: save the iterator from the dup-check to speed up insert if (!dupsAllowed && keyExists(*_data, key)) return buildDupKeyErrorStatus(key, _collectionNamespace, _indexName, _keyPattern); IndexKeyEntry entry(key.getOwned(), loc); if (_data->insert(entry).second) { _currentKeySize += key.objsize(); opCtx->recoveryUnit()->registerChange(new IndexChange(_data, entry, true)); } return StatusWith<SpecialFormatInserted>(SpecialFormatInserted::NoSpecialFormatInserted); }
StatusWith<SpecialFormatInserted> addKey(const BSONObj& key, const RecordId& loc) { // inserts should be in ascending (key, RecordId) order. invariant(loc.isValid()); invariant(!hasFieldNames(key)); if (!_data->empty()) { // Compare specified key with last inserted key, ignoring its RecordId int cmp = _comparator.compare(IndexKeyEntry(key, RecordId()), *_last); if (cmp < 0 || (_dupsAllowed && cmp == 0 && loc < _last->loc)) { return Status(ErrorCodes::InternalError, "expected ascending (key, RecordId) order in bulk builder"); } else if (!_dupsAllowed && cmp == 0 && loc != _last->loc) { return buildDupKeyErrorStatus(key, _collectionNamespace, _indexName, _keyPattern); } } BSONObj owned = key.getOwned(); _last = _data->insert(_data->end(), IndexKeyEntry(owned, loc)); *_currentKeySize += key.objsize(); return StatusWith<SpecialFormatInserted>(SpecialFormatInserted::NoSpecialFormatInserted); }
virtual Status dupKeyCheck(OperationContext* opCtx, const BSONObj& key) { invariant(!hasFieldNames(key)); if (isDup(*_data, key)) return buildDupKeyErrorStatus(key, _collectionNamespace, _indexName, _keyPattern); return Status::OK(); }
Status AbstractIndexAccessMethod::commitBulk(OperationContext* opCtx, BulkBuilder* bulk, bool mayInterrupt, bool dupsAllowed, set<RecordId>* dupRecords, std::vector<BSONObj>* dupKeysInserted) { // Cannot simultaneously report uninserted duplicates 'dupRecords' and inserted duplicates // 'dupKeysInserted'. invariant(!(dupRecords && dupKeysInserted)); Timer timer; std::unique_ptr<BulkBuilder::Sorter::Iterator> it(bulk->done()); static const char* message = "Index Build: inserting keys from external sorter into index"; ProgressMeterHolder pm; { stdx::unique_lock<Client> lk(*opCtx->getClient()); pm.set(CurOp::get(opCtx)->setProgress_inlock( message, bulk->getKeysInserted(), 3 /* secondsBetween */)); } auto builder = std::unique_ptr<SortedDataBuilderInterface>( _newInterface->getBulkBuilder(opCtx, dupsAllowed)); bool checkIndexKeySize = shouldCheckIndexKeySize(opCtx); BSONObj previousKey; const Ordering ordering = Ordering::make(_descriptor->keyPattern()); while (it->more()) { if (mayInterrupt) { opCtx->checkForInterrupt(); } WriteUnitOfWork wunit(opCtx); // Get the next datum and add it to the builder. BulkBuilder::Sorter::Data data = it->next(); // Before attempting to insert, perform a duplicate key check. bool isDup = false; if (_descriptor->unique()) { isDup = data.first.woCompare(previousKey, ordering) == 0; if (isDup && !dupsAllowed) { if (dupRecords) { dupRecords->insert(data.second); continue; } return buildDupKeyErrorStatus(data.first, _descriptor->parentNS(), _descriptor->indexName(), _descriptor->keyPattern()); } } Status status = checkIndexKeySize ? checkKeySize(data.first) : Status::OK(); if (status.isOK()) { StatusWith<SpecialFormatInserted> ret = builder->addKey(data.first, data.second); status = ret.getStatus(); if (status.isOK() && ret.getValue() == SpecialFormatInserted::LongTypeBitsInserted) _btreeState->setIndexKeyStringWithLongTypeBitsExistsOnDisk(opCtx); } if (!status.isOK()) { // Duplicates are checked before inserting. invariant(status.code() != ErrorCodes::DuplicateKey); // Overlong key that's OK to skip? // TODO SERVER-36385: Remove this when there is no KeyTooLong error. if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong()) { continue; } return status; } previousKey = data.first.getOwned(); if (isDup && dupsAllowed && dupKeysInserted) { dupKeysInserted->push_back(data.first.getOwned()); } // If we're here either it's a dup and we're cool with it or the addKey went just fine. pm.hit(); wunit.commit(); } pm.finished(); log() << "index build: inserted keys from external sorter into index in " << timer.seconds() << " seconds"; WriteUnitOfWork wunit(opCtx); SpecialFormatInserted specialFormatInserted = builder->commit(mayInterrupt); // It's ok to insert KeyStrings with long TypeBits but we need to mark the feature // tracker bit so that downgrade binary which cannot read the long TypeBits fails to // start up. if (specialFormatInserted == SpecialFormatInserted::LongTypeBitsInserted) _btreeState->setIndexKeyStringWithLongTypeBitsExistsOnDisk(opCtx); wunit.commit(); return Status::OK(); }