void DurableViewCatalogImpl::upsert(OperationContext* opCtx, const NamespaceString& name, const BSONObj& view) { dassert(opCtx->lockState()->isDbLockedForMode(_db->name(), MODE_X)); NamespaceString systemViewsNs(_db->getSystemViewsName()); Collection* systemViews = _db->getCollection(opCtx, systemViewsNs); invariant(systemViews); const bool requireIndex = false; RecordId id = Helpers::findOne(opCtx, systemViews, BSON("_id" << name.ns()), requireIndex); Snapshotted<BSONObj> oldView; if (!id.isValid() || !systemViews->findDoc(opCtx, id, &oldView)) { LOG(2) << "insert view " << view << " into " << _db->getSystemViewsName(); uassertStatusOK( systemViews->insertDocument(opCtx, InsertStatement(view), &CurOp::get(opCtx)->debug())); } else { CollectionUpdateArgs args; args.update = view; args.criteria = BSON("_id" << name.ns()); args.fromMigrate = false; const bool assumeIndexesAreAffected = true; systemViews->updateDocument( opCtx, id, oldView, view, assumeIndexesAreAffected, &CurOp::get(opCtx)->debug(), &args); } }
void RecordStoreValidateAdaptor::traverseRecordStore(RecordStore* recordStore, ValidateCmdLevel level, ValidateResults* results, BSONObjBuilder* output) { long long nrecords = 0; long long dataSizeTotal = 0; long long nInvalid = 0; results->valid = true; std::unique_ptr<SeekableRecordCursor> cursor = recordStore->getCursor(_opCtx, true); int interruptInterval = 4096; RecordId prevRecordId; while (auto record = cursor->next()) { ++nrecords; if (!(nrecords % interruptInterval)) { _opCtx->checkForInterrupt(); } auto dataSize = record->data.size(); dataSizeTotal += dataSize; size_t validatedSize; Status status = validate(record->id, record->data, &validatedSize); // Checks to ensure isInRecordIdOrder() is being used properly. if (prevRecordId.isValid()) { invariant(prevRecordId < record->id); } // While some storage engines may use padding, we still require that they return the // unpadded record data. if (!status.isOK() || validatedSize != static_cast<size_t>(dataSize)) { if (results->valid) { // Only log once. results->errors.push_back("detected one or more invalid documents (see logs)"); } nInvalid++; results->valid = false; log() << "document at location: " << record->id << " is corrupted"; } prevRecordId = record->id; } if (results->valid) { recordStore->updateStatsAfterRepair(_opCtx, nrecords, dataSizeTotal); } output->append("nInvalidDocuments", nInvalid); output->appendNumber("nrecords", nrecords); }
void DurableViewCatalogImpl::remove(OperationContext* opCtx, const NamespaceString& name) { dassert(opCtx->lockState()->isDbLockedForMode(_db->name(), MODE_X)); Collection* systemViews = _db->getCollection(opCtx, _db->getSystemViewsName()); if (!systemViews) return; const bool requireIndex = false; RecordId id = Helpers::findOne(opCtx, systemViews, BSON("_id" << name.ns()), requireIndex); if (!id.isValid()) return; LOG(2) << "remove view " << name << " from " << _db->getSystemViewsName(); systemViews->deleteDocument(opCtx, kUninitializedStmtId, id, &CurOp::get(opCtx)->debug()); }
virtual void unindex(OperationContext* opCtx, const BSONObj& key, const RecordId& loc, bool dupsAllowed) { invariant(loc.isValid()); invariant(!hasFieldNames(key)); IndexKeyEntry entry(key.getOwned(), loc); const size_t numDeleted = _data->erase(entry); invariant(numDeleted <= 1); if (numDeleted == 1) { _currentKeySize -= key.objsize(); opCtx->recoveryUnit()->registerChange(new IndexChange(_data, entry, false)); } }
virtual StatusWith<SpecialFormatInserted> insert(OperationContext* opCtx, const BSONObj& key, const RecordId& loc, bool dupsAllowed) { invariant(loc.isValid()); invariant(!hasFieldNames(key)); // TODO optimization: save the iterator from the dup-check to speed up insert if (!dupsAllowed && keyExists(*_data, key)) return buildDupKeyErrorStatus(key, _collectionNamespace, _indexName, _keyPattern); IndexKeyEntry entry(key.getOwned(), loc); if (_data->insert(entry).second) { _currentKeySize += key.objsize(); opCtx->recoveryUnit()->registerChange(new IndexChange(_data, entry, true)); } return StatusWith<SpecialFormatInserted>(SpecialFormatInserted::NoSpecialFormatInserted); }
StatusWith<SpecialFormatInserted> addKey(const BSONObj& key, const RecordId& loc) { // inserts should be in ascending (key, RecordId) order. invariant(loc.isValid()); invariant(!hasFieldNames(key)); if (!_data->empty()) { // Compare specified key with last inserted key, ignoring its RecordId int cmp = _comparator.compare(IndexKeyEntry(key, RecordId()), *_last); if (cmp < 0 || (_dupsAllowed && cmp == 0 && loc < _last->loc)) { return Status(ErrorCodes::InternalError, "expected ascending (key, RecordId) order in bulk builder"); } else if (!_dupsAllowed && cmp == 0 && loc != _last->loc) { return buildDupKeyErrorStatus(key, _collectionNamespace, _indexName, _keyPattern); } } BSONObj owned = key.getOwned(); _last = _data->insert(_data->end(), IndexKeyEntry(owned, loc)); *_currentKeySize += key.objsize(); return StatusWith<SpecialFormatInserted>(SpecialFormatInserted::NoSpecialFormatInserted); }