void run() { // Create a new collection. Database* db = _ctx.db(); Collection* coll; { WriteUnitOfWork wunit(&_opCtx); ASSERT_OK(db->dropCollection(&_opCtx, _nss)); coll = db->createCollection(&_opCtx, _nss); OpDebug* const nullOpDebug = nullptr; ASSERT_OK(coll->insertDocument(&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << "dup")), nullOpDebug, true)); ASSERT_OK(coll->insertDocument(&_opCtx, InsertStatement(BSON("_id" << 2 << "a" << "dup")), nullOpDebug, true)); wunit.commit(); } MultiIndexBlock indexer; const BSONObj spec = BSON("name" << "a" << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v" << static_cast<int>(kIndexVersion) << "unique" << true << "background" << background); ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, coll); }); ASSERT_OK(indexer.init(&_opCtx, coll, spec, MultiIndexBlock::kNoopOnInitFn).getStatus()); auto desc = coll->getIndexCatalog()->findIndexByName(&_opCtx, "a", true /* includeUnfinished */); ASSERT(desc); // Hybrid index builds check duplicates explicitly. ASSERT_OK(indexer.insertAllDocumentsInCollection(&_opCtx, coll)); auto status = indexer.checkConstraints(&_opCtx); ASSERT_EQUALS(status.code(), ErrorCodes::DuplicateKey); }
void run() { // Create a new collection. Database* db = _ctx.db(); Collection* coll; { WriteUnitOfWork wunit(&_opCtx); ASSERT_OK(db->dropCollection(&_opCtx, _nss)); coll = db->createCollection(&_opCtx, _nss); OpDebug* const nullOpDebug = nullptr; ASSERT_OK(coll->insertDocument(&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << "dup")), nullOpDebug, true)); ASSERT_OK(coll->insertDocument(&_opCtx, InsertStatement(BSON("_id" << 2 << "a" << "dup")), nullOpDebug, true)); wunit.commit(); } MultiIndexBlock indexer; indexer.ignoreUniqueConstraint(); const BSONObj spec = BSON("name" << "a" << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v" << static_cast<int>(kIndexVersion) << "unique" << true << "background" << background); ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, coll); }); ASSERT_OK(indexer.init(&_opCtx, coll, spec, MultiIndexBlock::kNoopOnInitFn).getStatus()); ASSERT_OK(indexer.insertAllDocumentsInCollection(&_opCtx, coll)); WriteUnitOfWork wunit(&_opCtx); ASSERT_OK(indexer.commit( &_opCtx, coll, MultiIndexBlock::kNoopOnCreateEachFn, MultiIndexBlock::kNoopOnCommitFn)); wunit.commit(); }
void run() { WriteUnitOfWork wunit(&_opCtx); BSONObj x = BSON("x" << 1); ASSERT(x["_id"].type() == 0); Collection* collection = _context.db()->getOrCreateCollection(&_opCtx, NamespaceString(ns())); OpDebug* const nullOpDebug = nullptr; ASSERT(!collection->insertDocument(&_opCtx, InsertStatement(x), nullOpDebug, true).isOK()); StatusWith<BSONObj> fixed = fixDocumentForInsert(_opCtx.getServiceContext(), x); ASSERT(fixed.isOK()); x = fixed.getValue(); ASSERT(x["_id"].type() == jstOID); ASSERT(collection->insertDocument(&_opCtx, InsertStatement(x), nullOpDebug, true).isOK()); wunit.commit(); }
void DurableViewCatalogImpl::upsert(OperationContext* opCtx, const NamespaceString& name, const BSONObj& view) { dassert(opCtx->lockState()->isDbLockedForMode(_db->name(), MODE_X)); NamespaceString systemViewsNs(_db->getSystemViewsName()); Collection* systemViews = _db->getCollection(opCtx, systemViewsNs); invariant(systemViews); const bool requireIndex = false; RecordId id = Helpers::findOne(opCtx, systemViews, BSON("_id" << name.ns()), requireIndex); Snapshotted<BSONObj> oldView; if (!id.isValid() || !systemViews->findDoc(opCtx, id, &oldView)) { LOG(2) << "insert view " << view << " into " << _db->getSystemViewsName(); uassertStatusOK( systemViews->insertDocument(opCtx, InsertStatement(view), &CurOp::get(opCtx)->debug())); } else { CollectionUpdateArgs args; args.update = view; args.criteria = BSON("_id" << name.ns()); args.fromMigrate = false; const bool assumeIndexesAreAffected = true; systemViews->updateDocument( opCtx, id, oldView, view, assumeIndexesAreAffected, &CurOp::get(opCtx)->debug(), &args); } }
void run() { // Create a new collection. Database* db = _ctx.db(); Collection* coll; { WriteUnitOfWork wunit(&_opCtx); ASSERT_OK(db->dropCollection(&_opCtx, _nss)); coll = db->createCollection(&_opCtx, _nss); // Drop all indexes including id index. coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true); // Insert some documents. int32_t nDocs = 1000; OpDebug* const nullOpDebug = nullptr; for (int32_t i = 0; i < nDocs; ++i) { ASSERT_OK( coll->insertDocument(&_opCtx, InsertStatement(BSON("a" << i)), nullOpDebug)); } wunit.commit(); } // Request an interrupt. getGlobalServiceContext()->setKillAllOperations(); BSONObj indexInfo = BSON("key" << BSON("a" << 1) << "ns" << _ns << "name" << "a_1" << "v" << static_cast<int>(kIndexVersion)); // The call is interrupted because mayInterrupt == true. ASSERT_TRUE(buildIndexInterrupted(indexInfo)); // only want to interrupt the index build getGlobalServiceContext()->unsetKillAllOperations(); // The new index is not listed in the index catalog because the index build failed. ASSERT(!coll->getIndexCatalog()->findIndexByName(&_opCtx, "a_1")); }
void insertDocument(Collection* collection, BSONObj obj) { WriteUnitOfWork wuow(&_opCtx); const bool enforceQuota = false; OpDebug* const nullOpDebug = nullptr; ASSERT_OK( collection->insertDocument(&_opCtx, InsertStatement(obj), nullOpDebug, enforceQuota)); wuow.commit(); }
void insert(const char* s) { WriteUnitOfWork wunit(&_opCtx); const BSONObj o = fromjson(s); OpDebug* const nullOpDebug = nullptr; if (o["_id"].eoo()) { BSONObjBuilder b; OID oid; oid.init(); b.appendOID("_id", &oid); b.appendElements(o); _collection->insertDocument(&_opCtx, InsertStatement(b.obj()), nullOpDebug, false) .transitional_ignore(); } else { _collection->insertDocument(&_opCtx, InsertStatement(o), nullOpDebug, false) .transitional_ignore(); } wunit.commit(); }
void MockReplCoordServerFixture::insertOplogEntry(const repl::OplogEntry& entry) { AutoGetCollection autoColl(opCtx(), NamespaceString::kRsOplogNamespace, MODE_IX); auto coll = autoColl.getCollection(); ASSERT_TRUE(coll != nullptr); auto status = coll->insertDocument(opCtx(), InsertStatement(entry.toBSON()), &CurOp::get(opCtx())->debug(), /* fromMigrate */ false); ASSERT_OK(status); }
bool DeferredWriter::insertDocument(BSONObj obj) { // We can't insert documents if we haven't been started up. invariant(_pool); stdx::lock_guard<stdx::mutex> lock(_mutex); // Check if we're allowed to insert this object. if (_numBytes + obj.objsize() >= _maxNumBytes) { // If not, drop it. We always drop new entries rather than old ones; that way the caller // knows at the time of the call that the entry was dropped. _logDroppedEntry(); return false; } // Add the object to the buffer. _numBytes += obj.objsize(); fassert(40588, _pool->schedule([this, obj] { _worker(InsertStatement(obj.getOwned())); })); return true; }
void run() { // Skip the test if the storage engine doesn't support capped collections. if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) { return; } // Recreate the collection as capped, without an _id index. Database* db = _ctx.db(); Collection* coll; { WriteUnitOfWork wunit(&_opCtx); ASSERT_OK(db->dropCollection(&_opCtx, _nss)); CollectionOptions options; options.capped = true; options.cappedSize = 10 * 1024; coll = db->createCollection(&_opCtx, _nss, options); coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true); // Insert some documents. int32_t nDocs = 1000; OpDebug* const nullOpDebug = nullptr; for (int32_t i = 0; i < nDocs; ++i) { ASSERT_OK(coll->insertDocument( &_opCtx, InsertStatement(BSON("_id" << i)), nullOpDebug, true)); } wunit.commit(); } // Request an interrupt. getGlobalServiceContext()->setKillAllOperations(); BSONObj indexInfo = BSON("key" << BSON("_id" << 1) << "ns" << _ns << "name" << "_id_" << "v" << static_cast<int>(kIndexVersion)); ASSERT_TRUE(buildIndexInterrupted(indexInfo)); // only want to interrupt the index build getGlobalServiceContext()->unsetKillAllOperations(); // The new index is not listed in the index catalog because the index build failed. ASSERT(!coll->getIndexCatalog()->findIndexByName(&_opCtx, "_id_")); }
void insert(const BSONObj& doc) { WriteUnitOfWork wunit(&_opCtx); OpDebug* const nullOpDebug = nullptr; ASSERT_OK(_coll->insertDocument(&_opCtx, InsertStatement(doc), nullOpDebug, false)); wunit.commit(); }
mongo::Status mongo::cloneCollectionAsCapped(OperationContext* opCtx, Database* db, const std::string& shortFrom, const std::string& shortTo, long long size, bool temp) { NamespaceString fromNss(db->name(), shortFrom); NamespaceString toNss(db->name(), shortTo); Collection* fromCollection = db->getCollection(opCtx, fromNss); if (!fromCollection) { if (db->getViewCatalog()->lookup(opCtx, fromNss.ns())) { return Status(ErrorCodes::CommandNotSupportedOnView, str::stream() << "cloneCollectionAsCapped not supported for views: " << fromNss.ns()); } return Status(ErrorCodes::NamespaceNotFound, str::stream() << "source collection " << fromNss.ns() << " does not exist"); } if (fromNss.isDropPendingNamespace()) { return Status(ErrorCodes::NamespaceNotFound, str::stream() << "source collection " << fromNss.ns() << " is currently in a drop-pending state."); } if (db->getCollection(opCtx, toNss)) { return Status(ErrorCodes::NamespaceExists, str::stream() << "cloneCollectionAsCapped failed - destination collection " << toNss.ns() << " already exists. source collection: " << fromNss.ns()); } // create new collection { auto options = fromCollection->getCatalogEntry()->getCollectionOptions(opCtx); // The capped collection will get its own new unique id, as the conversion isn't reversible, // so it can't be rolled back. options.uuid.reset(); options.capped = true; options.cappedSize = size; if (temp) options.temp = true; BSONObjBuilder cmd; cmd.append("create", toNss.coll()); cmd.appendElements(options.toBSON()); Status status = createCollection(opCtx, toNss.db().toString(), cmd.done()); if (!status.isOK()) return status; } Collection* toCollection = db->getCollection(opCtx, toNss); invariant(toCollection); // we created above // how much data to ignore because it won't fit anyway // datasize and extentSize can't be compared exactly, so add some padding to 'size' long long allocatedSpaceGuess = std::max(static_cast<long long>(size * 2), static_cast<long long>(toCollection->getRecordStore()->storageSize(opCtx) * 2)); long long excessSize = fromCollection->dataSize(opCtx) - allocatedSpaceGuess; auto exec = InternalPlanner::collectionScan(opCtx, fromNss.ns(), fromCollection, PlanExecutor::WRITE_CONFLICT_RETRY_ONLY, InternalPlanner::FORWARD); Snapshotted<BSONObj> objToClone; RecordId loc; PlanExecutor::ExecState state = PlanExecutor::FAILURE; // suppress uninitialized warnings DisableDocumentValidation validationDisabler(opCtx); int retries = 0; // non-zero when retrying our last document. while (true) { if (!retries) { state = exec->getNextSnapshotted(&objToClone, &loc); } switch (state) { case PlanExecutor::IS_EOF: return Status::OK(); case PlanExecutor::ADVANCED: { if (excessSize > 0) { // 4x is for padding, power of 2, etc... excessSize -= (4 * objToClone.value().objsize()); continue; } break; } default: // Unreachable as: // 1) We require a read lock (at a minimum) on the "from" collection // and won't yield, preventing collection drop and PlanExecutor::DEAD // 2) PlanExecutor::FAILURE is only returned on PlanStage::FAILURE. The // CollectionScan PlanStage does not have a FAILURE scenario. // 3) All other PlanExecutor states are handled above MONGO_UNREACHABLE; } try { // Make sure we are working with the latest version of the document. if (objToClone.snapshotId() != opCtx->recoveryUnit()->getSnapshotId() && !fromCollection->findDoc(opCtx, loc, &objToClone)) { // doc was deleted so don't clone it. retries = 0; continue; } WriteUnitOfWork wunit(opCtx); OpDebug* const nullOpDebug = nullptr; uassertStatusOK(toCollection->insertDocument( opCtx, InsertStatement(objToClone.value()), nullOpDebug, true)); wunit.commit(); // Go to the next document retries = 0; } catch (const WriteConflictException&) { CurOp::get(opCtx)->debug().additiveMetrics.incrementWriteConflicts(1); retries++; // logAndBackoff expects this to be 1 on first call. WriteConflictException::logAndBackoff(retries, "cloneCollectionAsCapped", fromNss.ns()); // Can't use writeConflictRetry since we need to save/restore exec around call to // abandonSnapshot. exec->saveState(); opCtx->recoveryUnit()->abandonSnapshot(); auto restoreStatus = exec->restoreState(); // Handles any WCEs internally. if (!restoreStatus.isOK()) { return restoreStatus; } } } MONGO_UNREACHABLE; }
void updateSessionEntry(OperationContext* opCtx, const UpdateRequest& updateRequest) { // Current code only supports replacement update. dassert(UpdateDriver::isDocReplacement(updateRequest.getUpdates())); AutoGetCollection autoColl(opCtx, NamespaceString::kSessionTransactionsTableNamespace, MODE_IX); uassert(40527, str::stream() << "Unable to persist transaction state because the session transaction " "collection is missing. This indicates that the " << NamespaceString::kSessionTransactionsTableNamespace.ns() << " collection has been manually deleted.", autoColl.getCollection()); WriteUnitOfWork wuow(opCtx); auto collection = autoColl.getCollection(); auto idIndex = collection->getIndexCatalog()->findIdIndex(opCtx); uassert(40672, str::stream() << "Failed to fetch _id index for " << NamespaceString::kSessionTransactionsTableNamespace.ns(), idIndex); auto indexAccess = collection->getIndexCatalog()->getIndex(idIndex); // Since we are looking up a key inside the _id index, create a key object consisting of only // the _id field. auto idToFetch = updateRequest.getQuery().firstElement(); auto toUpdateIdDoc = idToFetch.wrap(); dassert(idToFetch.fieldNameStringData() == "_id"_sd); auto recordId = indexAccess->findSingle(opCtx, toUpdateIdDoc); auto startingSnapshotId = opCtx->recoveryUnit()->getSnapshotId(); if (recordId.isNull()) { // Upsert case. auto status = collection->insertDocument( opCtx, InsertStatement(updateRequest.getUpdates()), nullptr, true, false); if (status == ErrorCodes::DuplicateKey) { throw WriteConflictException(); } uassertStatusOK(status); wuow.commit(); return; } auto originalRecordData = collection->getRecordStore()->dataFor(opCtx, recordId); auto originalDoc = originalRecordData.toBson(); invariant(collection->getDefaultCollator() == nullptr); boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx, nullptr)); auto matcher = fassertStatusOK( 40673, MatchExpressionParser::parse(updateRequest.getQuery(), std::move(expCtx))); if (!matcher->matchesBSON(originalDoc)) { // Document no longer match what we expect so throw WCE to make the caller re-examine. throw WriteConflictException(); } OplogUpdateEntryArgs args; args.nss = NamespaceString::kSessionTransactionsTableNamespace; args.uuid = collection->uuid(); args.update = updateRequest.getUpdates(); args.criteria = toUpdateIdDoc; args.fromMigrate = false; collection->updateDocument(opCtx, recordId, Snapshotted<BSONObj>(startingSnapshotId, originalDoc), updateRequest.getUpdates(), true, // enforceQuota false, // indexesAffected = false because _id is the only index nullptr, &args); wuow.commit(); }