예제 #1
0
    void configureSystemIndexes(OperationContext* txn) {
        int authzVersion;
        Status status = getGlobalAuthorizationManager()->getAuthorizationVersion(
                                                                txn, &authzVersion);
        if (!status.isOK()) {
            return;
        }

        if (authzVersion >= AuthorizationManager::schemaVersion26Final) {
            const NamespaceString systemUsers("admin", "system.users");

            // Make sure the old unique index from v2.4 on system.users doesn't exist.
            AutoGetDb autoDb(txn, systemUsers.db(), MODE_X);
            if (!autoDb.getDb()) {
                return;
            }

            Collection* collection = autoDb.getDb()->getCollection(txn,
                                                                   NamespaceString(systemUsers));
            if (!collection) {
                return;
            }

            IndexCatalog* indexCatalog = collection->getIndexCatalog();
            IndexDescriptor* oldIndex = NULL;

            WriteUnitOfWork wunit(txn);
            while ((oldIndex = indexCatalog->findIndexByKeyPattern(txn, v1SystemUsersKeyPattern))) {
                indexCatalog->dropIndex(txn, oldIndex);
            }
            wunit.commit();
        }
    }
void ShardingInitializationMongoD::updateShardIdentityConfigString(
    OperationContext* opCtx, const ConnectionString& newConnectionString) {
    BSONObj updateObj(
        ShardIdentityType::createConfigServerUpdateObject(newConnectionString.toString()));

    UpdateRequest updateReq(NamespaceString::kServerConfigurationNamespace);
    updateReq.setQuery(BSON("_id" << ShardIdentityType::IdName));
    updateReq.setUpdateModification(updateObj);

    try {
        AutoGetOrCreateDb autoDb(
            opCtx, NamespaceString::kServerConfigurationNamespace.db(), MODE_X);

        auto result = update(opCtx, autoDb.getDb(), updateReq);
        if (result.numMatched == 0) {
            warning() << "failed to update config string of shard identity document because "
                      << "it does not exist. This shard could have been removed from the cluster";
        } else {
            LOG(2) << "Updated config server connection string in shardIdentity document to"
                   << newConnectionString;
        }
    } catch (const DBException& exception) {
        auto status = exception.toStatus();
        if (!ErrorCodes::isNotMasterError(status.code())) {
            warning() << "Error encountered while trying to update config connection string to "
                      << newConnectionString.toString() << causedBy(redact(status));
        }
    }
}
예제 #3
0
파일: auth_index_d.cpp 프로젝트: wjin/mongo
    Status verifySystemIndexes(OperationContext* txn) {
        const NamespaceString systemUsers = AuthorizationManager::usersCollectionNamespace;

        // Make sure the old unique index from v2.4 on system.users doesn't exist.
        ScopedTransaction scopedXact(txn, MODE_IX);
        AutoGetDb autoDb(txn, systemUsers.db(), MODE_X);
        if (!autoDb.getDb()) {
            return Status::OK();
        }

        Collection* collection = autoDb.getDb()->getCollection(NamespaceString(systemUsers));
        if (!collection) {
            return Status::OK();
        }

        IndexCatalog* indexCatalog = collection->getIndexCatalog();
        IndexDescriptor* oldIndex = NULL;

        if (indexCatalog &&
            (oldIndex = indexCatalog->findIndexByKeyPattern(txn, v1SystemUsersKeyPattern))) {
            return Status(ErrorCodes::AuthSchemaIncompatible,
                          "Old 2.4 style user index identified. "
                          "The authentication schema needs to be updated by "
                          "running authSchemaUpgrade on a 2.6 server.");
        }

        return Status::OK();
    }
예제 #4
0
    void restartInProgressIndexesFromLastShutdown(OperationContext* txn) {
        txn->getClient()->getAuthorizationSession()->grantInternalAuthorization();

        std::vector<std::string> dbNames;

        StorageEngine* storageEngine = getGlobalEnvironment()->getGlobalStorageEngine();
        storageEngine->listDatabases( &dbNames );

        try {
            std::list<std::string> collNames;
            for (std::vector<std::string>::const_iterator dbName = dbNames.begin();
                 dbName < dbNames.end();
                 ++dbName) {

                ScopedTransaction scopedXact(txn, MODE_IS);
                AutoGetDb autoDb(txn, *dbName, MODE_S);

                Database* db = autoDb.getDb();
                db->getDatabaseCatalogEntry()->getCollectionNamespaces(&collNames);
            }
            checkNS(txn, collNames);
        }
        catch (const DBException& e) {
            error() << "Index verification did not complete: " << e.toString();
            fassertFailedNoTrace(18643);
        }
        LOG(1) << "checking complete" << endl;
    }
예제 #5
0
파일: dbtests.cpp 프로젝트: 3rf/mongo
 Status createIndexFromSpec(OperationContext* txn, const StringData& ns, const BSONObj& spec) {
     AutoGetOrCreateDb autoDb(txn, nsToDatabaseSubstring(ns), MODE_X);
     Collection* coll;
     {
         WriteUnitOfWork wunit(txn);
         coll = autoDb.getDb()->getOrCreateCollection(txn, ns);
         invariant(coll);
         wunit.commit();
     }
     MultiIndexBlock indexer(txn, coll);
     Status status = indexer.init(spec);
     if (status == ErrorCodes::IndexAlreadyExists) {
         return Status::OK();
     }
     if (!status.isOK()) {
         return status;
     }
     status = indexer.insertAllDocumentsInCollection();
     if (!status.isOK()) {
         return status;
     }
     WriteUnitOfWork wunit(txn);
     indexer.commit();
     wunit.commit();
     return Status::OK();
 }
예제 #6
0
Status dropIndexes(OperationContext* txn,
                   const NamespaceString& ns,
                   const BSONObj& idxDescriptor,
                   BSONObjBuilder* result) {
    StringData dbName = ns.db();
    MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
        ScopedTransaction transaction(txn, MODE_IX);
        AutoGetDb autoDb(txn, dbName, MODE_X);

        bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
            !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns);

        if (userInitiatedWritesAndNotPrimary) {
            return Status(ErrorCodes::NotMaster,
                          str::stream() << "Not primary while dropping indexes in "
                                        << ns.toString());
        }

        WriteUnitOfWork wunit(txn);
        Status status = wrappedRun(txn, dbName, ns, autoDb.getDb(), idxDescriptor, result);
        if (!status.isOK()) {
            return status;
        }
        getGlobalServiceContext()->getOpObserver()->onDropIndex(
            txn, dbName.toString() + ".$cmd", idxDescriptor);
        wunit.commit();
    }
    MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropIndexes", dbName);
    return Status::OK();
}
예제 #7
0
        bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg,
                 BSONObjBuilder& result, bool fromRepl) {
            const std::string ns = parseNsCollectionRequired(dbname, jsobj);
            MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
                ScopedTransaction transaction(txn, MODE_IX);
                AutoGetDb autoDb(txn, dbname, MODE_X);

                if (!fromRepl &&
                    !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname)) {
                    return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
                        << "Not primary while dropping indexes in " << ns));
                }

                WriteUnitOfWork wunit(txn);
                bool ok = wrappedRun(txn, dbname, ns, autoDb.getDb(), jsobj, errmsg, result);
                if (!ok) {
                    return false;
                }
                if (!fromRepl) {
                    getGlobalEnvironment()->getOpObserver()->onDropIndex(txn,
                                                                         dbname + ".$cmd",
                                                                         jsobj);
                }
                wunit.commit();
            } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropIndexes", dbname);
            return true;
        }
예제 #8
0
        bool run(OperationContext* txn,
                 const string& dbname,
                 BSONObj& jsobj,
                 int,
                 string& errmsg,
                 BSONObjBuilder& result,
                 bool /*fromRepl*/) {

            ScopedTransaction scopedXact(txn, MODE_IS);
            AutoGetDb autoDb(txn, dbname, MODE_S);

            const Database* d = autoDb.getDb();
            const DatabaseCatalogEntry* dbEntry = NULL;

            list<string> names;
            if ( d ) {
                dbEntry = d->getDatabaseCatalogEntry();
                dbEntry->getCollectionNamespaces( &names );
                names.sort();
            }

            scoped_ptr<MatchExpression> matcher;
            if ( jsobj["filter"].isABSONObj() ) {
                StatusWithMatchExpression parsed =
                    MatchExpressionParser::parse( jsobj["filter"].Obj() );
                if ( !parsed.isOK() ) {
                    return appendCommandStatus( result, parsed.getStatus() );
                }
                matcher.reset( parsed.getValue() );
            }

            BSONArrayBuilder arr;

            for ( list<string>::const_iterator i = names.begin(); i != names.end(); ++i ) {
                string ns = *i;

                StringData collection = nsToCollectionSubstring( ns );
                if ( collection == "system.namespaces" ) {
                    continue;
                }

                BSONObjBuilder b;
                b.append( "name", collection );

                CollectionOptions options =
                    dbEntry->getCollectionCatalogEntry( txn, ns )->getCollectionOptions(txn);
                b.append( "options", options.toBSON() );

                BSONObj maybe = b.obj();
                if ( matcher && !matcher->matchesBSON( maybe ) ) {
                    continue;
                }

                arr.append( maybe );
            }

            result.append( "collections", arr.arr() );

            return true;
        }
예제 #9
0
    void run() {
        string ns = "unittests.rollback_set_index_head";
        const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
        OperationContext& opCtx = *opCtxPtr;
        NamespaceString nss(ns);
        dropDatabase(&opCtx, nss);
        createCollection(&opCtx, nss);

        AutoGetDb autoDb(&opCtx, nss.db(), MODE_X);

        Collection* coll = autoDb.getDb()->getCollection(&opCtx, nss);
        IndexCatalog* catalog = coll->getIndexCatalog();

        string idxName = "a";
        BSONObj spec = BSON("ns" << ns << "key" << BSON("a" << 1) << "name" << idxName << "v"
                                 << static_cast<int>(kIndexVersion));

        {
            WriteUnitOfWork uow(&opCtx);
            ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, spec));
            uow.commit();
        }

        IndexDescriptor* indexDesc = catalog->findIndexByName(&opCtx, idxName);
        invariant(indexDesc);
        const IndexCatalogEntry* ice = catalog->getEntry(indexDesc);
        invariant(ice);
        HeadManager* headManager = ice->headManager();

        const RecordId oldHead = headManager->getHead(&opCtx);
        ASSERT_EQ(oldHead, ice->head(&opCtx));

        const RecordId dummyHead(123, 456);
        ASSERT_NE(oldHead, dummyHead);

        // END SETUP / START TEST

        {
            WriteUnitOfWork uow(&opCtx);

            headManager->setHead(&opCtx, dummyHead);

            ASSERT_EQ(ice->head(&opCtx), dummyHead);
            ASSERT_EQ(headManager->getHead(&opCtx), dummyHead);

            if (!rollback) {
                uow.commit();
            }
        }

        if (rollback) {
            ASSERT_EQ(ice->head(&opCtx), oldHead);
            ASSERT_EQ(headManager->getHead(&opCtx), oldHead);
        } else {
            ASSERT_EQ(ice->head(&opCtx), dummyHead);
            ASSERT_EQ(headManager->getHead(&opCtx), dummyHead);
        }
    }
예제 #10
0
mongo::Status mongo::emptyCapped(OperationContext* opCtx, const NamespaceString& collectionName) {
    AutoGetDb autoDb(opCtx, collectionName.db(), MODE_X);

    bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
        !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, collectionName);

    if (userInitiatedWritesAndNotPrimary) {
        return Status(ErrorCodes::NotMaster,
                      str::stream() << "Not primary while truncating collection: "
                                    << collectionName.ns());
    }

    Database* db = autoDb.getDb();
    uassert(ErrorCodes::NamespaceNotFound, "no such database", db);

    Collection* collection = db->getCollection(opCtx, collectionName);
    uassert(ErrorCodes::CommandNotSupportedOnView,
            str::stream() << "emptycapped not supported on view: " << collectionName.ns(),
            collection || !db->getViewCatalog()->lookup(opCtx, collectionName.ns()));
    uassert(ErrorCodes::NamespaceNotFound, "no such collection", collection);

    if (collectionName.isSystem() && !collectionName.isSystemDotProfile()) {
        return Status(ErrorCodes::IllegalOperation,
                      str::stream() << "Cannot truncate a system collection: "
                                    << collectionName.ns());
    }

    if (collectionName.isVirtualized()) {
        return Status(ErrorCodes::IllegalOperation,
                      str::stream() << "Cannot truncate a virtual collection: "
                                    << collectionName.ns());
    }

    if ((repl::ReplicationCoordinator::get(opCtx)->getReplicationMode() !=
         repl::ReplicationCoordinator::modeNone) &&
        collectionName.isOplog()) {
        return Status(ErrorCodes::OplogOperationUnsupported,
                      str::stream() << "Cannot truncate a live oplog while replicating: "
                                    << collectionName.ns());
    }

    BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());

    WriteUnitOfWork wuow(opCtx);

    Status status = collection->truncate(opCtx);
    if (!status.isOK()) {
        return status;
    }

    getGlobalServiceContext()->getOpObserver()->onEmptyCapped(
        opCtx, collection->ns(), collection->uuid());

    wuow.commit();

    return Status::OK();
}
예제 #11
0
Status TextMatchExpression::init(OperationContext* opCtx,
                                 const NamespaceString& nss,
                                 TextParams params) {
    _ftsQuery.setQuery(std::move(params.query));
    _ftsQuery.setLanguage(std::move(params.language));
    _ftsQuery.setCaseSensitive(params.caseSensitive);
    _ftsQuery.setDiacriticSensitive(params.diacriticSensitive);

    fts::TextIndexVersion version;
    {
        // Find text index.
        AutoGetDb autoDb(opCtx, nss.db(), MODE_IS);
        Lock::CollectionLock collLock(opCtx->lockState(), nss.ns(), MODE_IS);
        Database* db = autoDb.getDb();
        if (!db) {
            return {ErrorCodes::IndexNotFound,
                    str::stream() << "text index required for $text query (no such collection '"
                                  << nss.ns()
                                  << "')"};
        }
        Collection* collection = db->getCollection(opCtx, nss);
        if (!collection) {
            return {ErrorCodes::IndexNotFound,
                    str::stream() << "text index required for $text query (no such collection '"
                                  << nss.ns()
                                  << "')"};
        }
        std::vector<IndexDescriptor*> idxMatches;
        collection->getIndexCatalog()->findIndexByType(opCtx, IndexNames::TEXT, idxMatches);
        if (idxMatches.empty()) {
            return {ErrorCodes::IndexNotFound, "text index required for $text query"};
        }
        if (idxMatches.size() > 1) {
            return {ErrorCodes::IndexNotFound, "more than one text index found for $text query"};
        }
        invariant(idxMatches.size() == 1);
        IndexDescriptor* index = idxMatches[0];
        const FTSAccessMethod* fam =
            static_cast<FTSAccessMethod*>(collection->getIndexCatalog()->getIndex(index));
        invariant(fam);

        // Extract version and default language from text index.
        version = fam->getSpec().getTextIndexVersion();
        if (_ftsQuery.getLanguage().empty()) {
            _ftsQuery.setLanguage(fam->getSpec().defaultLanguage().str());
        }
    }

    Status parseStatus = _ftsQuery.parse(version);
    if (!parseStatus.isOK()) {
        return parseStatus;
    }

    return setPath("_fts");
}
예제 #12
0
TEST_F(AuthOpObserverTest, MultipleAboutToDeleteAndOnDelete) {
    auto uuid = UUID::gen();
    AuthOpObserver opObserver;
    auto opCtx = cc().makeOperationContext();
    NamespaceString nss = {"test", "coll"};
    AutoGetDb autoDb(opCtx.get(), nss.db(), MODE_X);
    WriteUnitOfWork wunit(opCtx.get());
    opObserver.aboutToDelete(opCtx.get(), nss, BSON("_id" << 1));
    opObserver.onDelete(opCtx.get(), nss, uuid, {}, false, {});
    opObserver.aboutToDelete(opCtx.get(), nss, BSON("_id" << 1));
    opObserver.onDelete(opCtx.get(), nss, uuid, {}, false, {});
}
예제 #13
0
void ReplicationRecoveryImpl::_truncateOplogTo(OperationContext* opCtx,
                                               Timestamp truncateTimestamp) {
    Timer timer;
    const NamespaceString oplogNss(NamespaceString::kRsOplogNamespace);
    AutoGetDb autoDb(opCtx, oplogNss.db(), MODE_IX);
    Lock::CollectionLock oplogCollectionLoc(opCtx->lockState(), oplogNss.ns(), MODE_X);
    Collection* oplogCollection = autoDb.getDb()->getCollection(opCtx, oplogNss);
    if (!oplogCollection) {
        fassertFailedWithStatusNoTrace(
            34418,
            Status(ErrorCodes::NamespaceNotFound,
                   str::stream() << "Can't find " << NamespaceString::kRsOplogNamespace.ns()));
    }

    // Scan through oplog in reverse, from latest entry to first, to find the truncateTimestamp.
    RecordId oldestIDToDelete;  // Non-null if there is something to delete.
    auto oplogRs = oplogCollection->getRecordStore();
    auto oplogReverseCursor = oplogRs->getCursor(opCtx, /*forward=*/false);
    size_t count = 0;
    while (auto next = oplogReverseCursor->next()) {
        const BSONObj entry = next->data.releaseToBson();
        const RecordId id = next->id;
        count++;

        const auto tsElem = entry["ts"];
        if (count == 1) {
            if (tsElem.eoo())
                LOG(2) << "Oplog tail entry: " << redact(entry);
            else
                LOG(2) << "Oplog tail entry ts field: " << tsElem;
        }

        if (tsElem.timestamp() < truncateTimestamp) {
            // If count == 1, that means that we have nothing to delete because everything in the
            // oplog is < truncateTimestamp.
            if (count != 1) {
                invariant(!oldestIDToDelete.isNull());
                oplogCollection->cappedTruncateAfter(opCtx, oldestIDToDelete, /*inclusive=*/true);
            }
            log() << "Replication recovery oplog truncation finished in: " << timer.millis()
                  << "ms";
            return;
        }

        oldestIDToDelete = id;
    }

    severe() << "Reached end of oplog looking for oplog entry before " << truncateTimestamp.toBSON()
             << " but couldn't find any after looking through " << count << " entries.";
    fassertFailedNoTrace(40296);
}
예제 #14
0
mongo::Status mongo::convertToCapped(OperationContext* opCtx,
                                     const NamespaceString& collectionName,
                                     long long size) {
    StringData dbname = collectionName.db();
    StringData shortSource = collectionName.coll();

    AutoGetDb autoDb(opCtx, collectionName.db(), MODE_X);

    bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
        !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, collectionName);

    if (userInitiatedWritesAndNotPrimary) {
        return Status(ErrorCodes::NotMaster,
                      str::stream() << "Not primary while converting " << collectionName.ns()
                                    << " to a capped collection");
    }

    Database* const db = autoDb.getDb();
    if (!db) {
        return Status(ErrorCodes::NamespaceNotFound,
                      str::stream() << "database " << dbname << " not found");
    }

    BackgroundOperation::assertNoBgOpInProgForDb(dbname);

    // Generate a temporary collection name that will not collide with any existing collections.
    auto tmpNameResult =
        db->makeUniqueCollectionNamespace(opCtx, "tmp%%%%%.convertToCapped." + shortSource);
    if (!tmpNameResult.isOK()) {
        return tmpNameResult.getStatus().withContext(
            str::stream() << "Cannot generate temporary collection namespace to convert "
                          << collectionName.ns()
                          << " to a capped collection");
    }
    const auto& longTmpName = tmpNameResult.getValue();
    const auto shortTmpName = longTmpName.coll().toString();

    {
        Status status =
            cloneCollectionAsCapped(opCtx, db, shortSource.toString(), shortTmpName, size, true);

        if (!status.isOK())
            return status;
    }

    RenameCollectionOptions options;
    options.dropTarget = true;
    options.stayTemp = false;
    return renameCollection(opCtx, longTmpName, collectionName, options);
}
예제 #15
0
파일: case_db.cpp 프로젝트: 0xNF/sleuthkit
/**
* Add an image to the database.  This method does not allow you
* to customize any of the settings for ingest (such as hash calculation,
* and block map population).  Use TskCaseDb::initAddImage() to set
* these values.
*
* @param numImg Number of images to add
* @param imagePaths Paths to the image splits to open.
* @param imgType TYpe of image format
* @param sSize Sector size of image
* @returns 1 on error and 0 on success
*/
uint8_t
    TskCaseDb::addImage(int numImg, const TSK_TCHAR * const imagePaths[],
    TSK_IMG_TYPE_ENUM imgType, unsigned int sSize)
{
    TskAutoDb autoDb(m_db, m_NSRLDb, m_knownBadDb);
    
    if (autoDb.startAddImage(numImg, imagePaths, imgType, sSize)) {
        autoDb.revertAddImage();
        return 1;
    }
    autoDb.commitAddImage();
    
    return 0;
}
예제 #16
0
    void run() {
        string ns = "unittests.rollback_drop_index";
        const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
        OperationContext& opCtx = *opCtxPtr;
        NamespaceString nss(ns);
        dropDatabase(&opCtx, nss);
        createCollection(&opCtx, nss);

        AutoGetDb autoDb(&opCtx, nss.db(), MODE_X);

        Collection* coll = autoDb.getDb()->getCollection(&opCtx, nss);
        IndexCatalog* catalog = coll->getIndexCatalog();

        string idxName = "a";
        BSONObj spec = BSON("ns" << ns << "key" << BSON("a" << 1) << "name" << idxName << "v"
                                 << static_cast<int>(kIndexVersion));

        {
            WriteUnitOfWork uow(&opCtx);
            ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, spec));
            insertRecord(&opCtx, nss, BSON("a" << 1));
            insertRecord(&opCtx, nss, BSON("a" << 2));
            insertRecord(&opCtx, nss, BSON("a" << 3));
            uow.commit();
        }
        ASSERT(indexReady(&opCtx, nss, idxName));
        ASSERT_EQ(3u, getNumIndexEntries(&opCtx, nss, idxName));

        // END SETUP / START TEST

        {
            WriteUnitOfWork uow(&opCtx);

            dropIndex(&opCtx, nss, idxName);
            ASSERT(!indexExists(&opCtx, nss, idxName));

            if (!rollback) {
                uow.commit();
            }
        }
        if (rollback) {
            ASSERT(indexExists(&opCtx, nss, idxName));
            ASSERT(indexReady(&opCtx, nss, idxName));
            ASSERT_EQ(3u, getNumIndexEntries(&opCtx, nss, idxName));
        } else {
            ASSERT(!indexExists(&opCtx, nss, idxName));
        }
    }
예제 #17
0
파일: db_raii.cpp 프로젝트: mpobrien/mongo
AutoStatsTracker::AutoStatsTracker(OperationContext* opCtx,
                                   const NamespaceString& nss,
                                   Top::LockType lockType,
                                   boost::optional<int> dbProfilingLevel)
    : _opCtx(opCtx), _lockType(lockType) {
    if (!dbProfilingLevel) {
        // No profiling level was determined, attempt to read the profiling level from the Database
        // object.
        AutoGetDb autoDb(_opCtx, nss.db(), MODE_IS);
        if (autoDb.getDb()) {
            dbProfilingLevel = autoDb.getDb()->getProfilingLevel();
        }
    }
    stdx::lock_guard<Client> clientLock(*_opCtx->getClient());
    CurOp::get(_opCtx)->enter_inlock(nss.ns().c_str(), dbProfilingLevel);
}
예제 #18
0
Status dropIndexes(OperationContext* opCtx,
                   const NamespaceString& nss,
                   const BSONObj& cmdObj,
                   BSONObjBuilder* result) {
    return writeConflictRetry(opCtx, "dropIndexes", nss.db(), [opCtx, &nss, &cmdObj, result] {
        AutoGetDb autoDb(opCtx, nss.db(), MODE_X);

        bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
            !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss);

        if (userInitiatedWritesAndNotPrimary) {
            return Status(ErrorCodes::NotMaster,
                          str::stream() << "Not primary while dropping indexes in " << nss);
        }

        if (!serverGlobalParams.quiet.load()) {
            LOG(0) << "CMD: dropIndexes " << nss << ": " << cmdObj[kIndexFieldName].toString(false);
        }

        // If db/collection does not exist, short circuit and return.
        Database* db = autoDb.getDb();
        Collection* collection = db ? db->getCollection(opCtx, nss) : nullptr;
        if (!db || !collection) {
            if (db && db->getViewCatalog()->lookup(opCtx, nss.ns())) {
                return Status(ErrorCodes::CommandNotSupportedOnView,
                              str::stream() << "Cannot drop indexes on view " << nss);
            }

            return Status(ErrorCodes::NamespaceNotFound, "ns not found");
        }

        WriteUnitOfWork wunit(opCtx);
        OldClientContext ctx(opCtx, nss.ns());
        BackgroundOperation::assertNoBgOpInProgForNs(nss);

        Status status = wrappedRun(opCtx, collection, cmdObj, result);
        if (!status.isOK()) {
            return status;
        }

        wunit.commit();
        return Status::OK();
    });
}
예제 #19
0
파일: sync_tail.cpp 프로젝트: Jaryli/mongo
bool SyncTail::shouldRetry(OperationContext* txn, const BSONObj& o) {
    const NamespaceString nss(o.getStringField("ns"));
    MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
        // Take an X lock on the database in order to preclude other modifications.
        // Also, the database might not exist yet, so create it.
        AutoGetOrCreateDb autoDb(txn, nss.db(), MODE_X);
        Database* const db = autoDb.getDb();

        // we don't have the object yet, which is possible on initial sync.  get it.
        log() << "adding missing object" << endl;  // rare enough we can log

        BSONObj missingObj = getMissingDoc(txn, db, o);

        if (missingObj.isEmpty()) {
            log() << "missing object not found on source."
                     " presumably deleted later in oplog";
            log() << "o2: " << o.getObjectField("o2").toString();
            log() << "o firstfield: " << o.getObjectField("o").firstElementFieldName();

            return false;
        } else {
            WriteUnitOfWork wunit(txn);

            Collection* const coll = db->getOrCreateCollection(txn, nss.toString());
            invariant(coll);

            Status status = coll->insertDocument(txn, missingObj, true);
            uassert(15917,
                    str::stream() << "failed to insert missing doc: " << status.toString(),
                    status.isOK());

            LOG(1) << "inserted missing doc: " << missingObj.toString() << endl;

            wunit.commit();
            return true;
        }
    }
    MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "InsertRetry", nss.ns());

    // fixes compile errors on GCC - see SERVER-18219 for details
    MONGO_UNREACHABLE;
}
예제 #20
0
    Status emptyCapped(OperationContext* txn,
                       const NamespaceString& collectionName) {
        ScopedTransaction scopedXact(txn, MODE_IX);
        AutoGetDb autoDb(txn, collectionName.db(), MODE_X);

        bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
            !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(
                    collectionName.db());

        if (userInitiatedWritesAndNotPrimary) {
            return Status(ErrorCodes::NotMaster,
                          str::stream() << "Not primary while truncating collection "
                                        << collectionName.ns());
        }

        Database* db = autoDb.getDb();
        massert(13429, "no such database", db);

        Collection* collection = db->getCollection(collectionName);
        massert(28584, "no such collection", collection);

        std::vector<BSONObj> indexes = stopIndexBuildsEmptyCapped(txn, db, collectionName);

        WriteUnitOfWork wuow(txn);

        Status status = collection->truncate(txn);
        if (!status.isOK()) {
            return status;
        }

        IndexBuilder::restoreIndexes(txn, indexes);

        getGlobalServiceContext()->getOpObserver()->onEmptyCapped(txn, collection->ns());

        wuow.commit();

        return Status::OK();
    }
예제 #21
0
    bool Sync::shouldRetry(OperationContext* txn, const BSONObj& o) {
        const NamespaceString nss(o.getStringField("ns"));

        // Take an X lock on the database in order to preclude other modifications. Also, the
        // database might not exist yet, so create it.
        AutoGetOrCreateDb autoDb(txn, nss.db(), MODE_X);
        Database* const db = autoDb.getDb();

        // we don't have the object yet, which is possible on initial sync.  get it.
        log() << "adding missing object" << endl; // rare enough we can log

        BSONObj missingObj = getMissingDoc(txn, db, o);

        if( missingObj.isEmpty() ) {
            log() << "missing object not found on source. presumably deleted later in oplog" << endl;
            log() << "o2: " << o.getObjectField("o2").toString() << endl;
            log() << "o firstfield: " << o.getObjectField("o").firstElementFieldName() << endl;

            return false;
        }
        else {
            WriteUnitOfWork wunit(txn);

            Collection* const collection = db->getOrCreateCollection(txn, nss.toString());
            invariant(collection);

            StatusWith<RecordId> result = collection->insertDocument(txn, missingObj, true);
            uassert(15917,
                    str::stream() << "failed to insert missing doc: "
                                  << result.getStatus().toString(),
                    result.isOK() );

            LOG(1) << "inserted missing doc: " << missingObj.toString() << endl;

            wunit.commit();
            return true;
        }
    }
예제 #22
0
    bool DBHashCmd::run(OperationContext* txn, const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
        Timer timer;

        set<string> desiredCollections;
        if ( cmdObj["collections"].type() == Array ) {
            BSONObjIterator i( cmdObj["collections"].Obj() );
            while ( i.more() ) {
                BSONElement e = i.next();
                if ( e.type() != String ) {
                    errmsg = "collections entries have to be strings";
                    return false;
                }
                desiredCollections.insert( e.String() );
            }
        }

        list<string> colls;
        const string ns = parseNs(dbname, cmdObj);

        // We lock the entire database in S-mode in order to ensure that the contents will not
        // change for the snapshot.
        AutoGetDb autoDb(txn, ns, MODE_S);
        Database* db = autoDb.getDb();
        if (db) {
            db->getDatabaseCatalogEntry()->getCollectionNamespaces(&colls);
            colls.sort();
        }

        result.appendNumber( "numCollections" , (long long)colls.size() );
        result.append( "host" , prettyHostName() );

        md5_state_t globalState;
        md5_init(&globalState);

        vector<string> cached;

        BSONObjBuilder bb( result.subobjStart( "collections" ) );
        for ( list<string>::iterator i=colls.begin(); i != colls.end(); i++ ) {
            string fullCollectionName = *i;
            if ( fullCollectionName.size() -1 <= dbname.size() ) {
                errmsg  = str::stream() << "weird fullCollectionName [" << fullCollectionName << "]";
                return false;
            }
            string shortCollectionName = fullCollectionName.substr( dbname.size() + 1 );

            if ( shortCollectionName.find( "system." ) == 0 )
                continue;

            if ( desiredCollections.size() > 0 &&
                 desiredCollections.count( shortCollectionName ) == 0 )
                continue;

            bool fromCache = false;
            string hash = hashCollection( txn, db, fullCollectionName, &fromCache );

            bb.append( shortCollectionName, hash );

            md5_append( &globalState , (const md5_byte_t*)hash.c_str() , hash.size() );
            if ( fromCache )
                cached.push_back( fullCollectionName );
        }
        bb.done();

        md5digest d;
        md5_finish(&globalState, d);
        string hash = digestToString( d );

        result.append( "md5" , hash );
        result.appendNumber( "timeMillis", timer.millis() );

        result.append( "fromCache", cached );

        return 1;
    }
예제 #23
0
Status verifySystemIndexes(OperationContext* opCtx) {
    const NamespaceString& systemUsers = AuthorizationManager::usersCollectionNamespace;
    const NamespaceString& systemRoles = AuthorizationManager::rolesCollectionNamespace;

    AutoGetDb autoDb(opCtx, systemUsers.db(), MODE_X);
    if (!autoDb.getDb()) {
        return Status::OK();
    }

    Collection* collection = autoDb.getDb()->getCollection(opCtx, systemUsers);
    if (collection) {
        IndexCatalog* indexCatalog = collection->getIndexCatalog();
        invariant(indexCatalog);

        // Make sure the old unique index from v2.4 on system.users doesn't exist.
        std::vector<IndexDescriptor*> indexes;
        indexCatalog->findIndexesByKeyPattern(opCtx, v1SystemUsersKeyPattern, false, &indexes);

        if (!indexes.empty()) {
            fassert(ErrorCodes::AmbiguousIndexKeyPattern, indexes.size() == 1);
            return Status(ErrorCodes::AuthSchemaIncompatible,
                          "Old 2.4 style user index identified. "
                          "The authentication schema needs to be updated by "
                          "running authSchemaUpgrade on a 2.6 server.");
        }

        // Ensure that system indexes exist for the user collection
        indexCatalog->findIndexesByKeyPattern(opCtx, v3SystemUsersKeyPattern, false, &indexes);
        if (indexes.empty()) {
            try {
                generateSystemIndexForExistingCollection(
                    opCtx, collection, systemUsers, v3SystemUsersIndexSpec);
            } catch (...) {
                return exceptionToStatus();
            }
        }
    }

    // Ensure that system indexes exist for the roles collection, if it exists.
    collection = autoDb.getDb()->getCollection(opCtx, systemRoles);
    if (collection) {
        IndexCatalog* indexCatalog = collection->getIndexCatalog();
        invariant(indexCatalog);

        std::vector<IndexDescriptor*> indexes;
        indexCatalog->findIndexesByKeyPattern(opCtx, v3SystemRolesKeyPattern, false, &indexes);
        if (indexes.empty()) {
            try {
                generateSystemIndexForExistingCollection(
                    opCtx, collection, systemRoles, v3SystemRolesIndexSpec);
            } catch (...) {
                return exceptionToStatus();
            }
        }
    }

    // Ensure that system indexes exist for the sessions collection, if it exists.
    collection = autoDb.getDb()->getCollection(opCtx, sessionCollectionNamespace);
    if (collection) {
        IndexCatalog* indexCatalog = collection->getIndexCatalog();
        invariant(indexCatalog);

        std::vector<IndexDescriptor*> indexes;
        indexCatalog->findIndexesByKeyPattern(opCtx, v1SystemSessionsKeyPattern, false, &indexes);
        if (indexes.empty()) {
            try {
                generateSystemIndexForExistingCollection(
                    opCtx, collection, sessionCollectionNamespace, v1SystemSessionsIndexSpec);
            } catch (...) {
                return exceptionToStatus();
            }
        }
    }

    return Status::OK();
}
예제 #24
0
Status dropCollection(OperationContext* opCtx,
                      const NamespaceString& collectionName,
                      BSONObjBuilder& result,
                      const repl::OpTime& dropOpTime,
                      DropCollectionSystemCollectionMode systemCollectionMode) {
    if (!serverGlobalParams.quiet.load()) {
        log() << "CMD: drop " << collectionName;
    }

    return writeConflictRetry(opCtx, "drop", collectionName.ns(), [&] {
        AutoGetDb autoDb(opCtx, collectionName.db(), MODE_X);
        Database* const db = autoDb.getDb();
        Collection* coll = db ? db->getCollection(opCtx, collectionName) : nullptr;
        auto view =
            db && !coll ? db->getViewCatalog()->lookup(opCtx, collectionName.ns()) : nullptr;

        if (MONGO_FAIL_POINT(hangDuringDropCollection)) {
            log() << "hangDuringDropCollection fail point enabled. Blocking until fail point is "
                     "disabled.";
            MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangDuringDropCollection);
        }

        if (!db || (!coll && !view)) {
            return Status(ErrorCodes::NamespaceNotFound, "ns not found");
        }

        const bool shardVersionCheck = true;
        OldClientContext context(opCtx, collectionName.ns(), shardVersionCheck);

        bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
            !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, collectionName);

        if (userInitiatedWritesAndNotPrimary) {
            return Status(ErrorCodes::NotMaster,
                          str::stream() << "Not primary while dropping collection "
                                        << collectionName);
        }

        WriteUnitOfWork wunit(opCtx);
        if (!result.hasField("ns")) {
            result.append("ns", collectionName.ns());
        }

        if (coll) {
            invariant(!view);
            int numIndexes = coll->getIndexCatalog()->numIndexesTotal(opCtx);

            BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());

            Status s = systemCollectionMode ==
                    DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops
                ? db->dropCollection(opCtx, collectionName.ns(), dropOpTime)
                : db->dropCollectionEvenIfSystem(opCtx, collectionName, dropOpTime);

            if (!s.isOK()) {
                return s;
            }

            result.append("nIndexesWas", numIndexes);
        } else {
            invariant(view);
            Status status = db->dropView(opCtx, collectionName.ns());
            if (!status.isOK()) {
                return status;
            }
        }
        wunit.commit();

        return Status::OK();
    });
}
예제 #25
0
    Status convertToCapped(OperationContext* txn,
                           const NamespaceString& collectionName,
                           double size) {

        StringData dbname = collectionName.db();
        StringData shortSource = collectionName.coll();

        ScopedTransaction transaction(txn, MODE_IX);
        AutoGetDb autoDb(txn, collectionName.db(), MODE_X);

        bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
            !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname);

        if (userInitiatedWritesAndNotPrimary) {
            return Status(ErrorCodes::NotMaster,
                          str::stream() << "Not primary while converting "
                                        << collectionName.ns() << " to a capped collection");
        }

        Database* const db = autoDb.getDb();
        if (!db) {
            return Status(ErrorCodes::DatabaseNotFound,
                          str::stream() << "database " << dbname << " not found");
        }

        stopIndexBuildsConvertToCapped(txn, db, collectionName);
        BackgroundOperation::assertNoBgOpInProgForDb(dbname);

        std::string shortTmpName = str::stream() << "tmp.convertToCapped." << shortSource;
        std::string longTmpName = str::stream() << dbname << "." << shortTmpName;

        WriteUnitOfWork wunit(txn);
        if (db->getCollection(longTmpName)) {
            Status status = db->dropCollection(txn, longTmpName);
            if (!status.isOK())
                return status;
        }


        const bool shouldReplicateWrites = txn->writesAreReplicated();
        txn->setReplicatedWrites(false);
        ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
        Status status = cloneCollectionAsCapped(txn,
                                                db,
                                                shortSource.toString(),
                                                shortTmpName,
                                                size,
                                                true);

        if (!status.isOK()) {
            return status;
        }

        verify(db->getCollection(longTmpName));

        status = db->dropCollection(txn, collectionName.ns());
        txn->setReplicatedWrites(shouldReplicateWrites);
        if (!status.isOK())
            return status;

        status = db->renameCollection(txn, longTmpName, collectionName.ns(), false);
        if (!status.isOK())
            return status;

        getGlobalServiceContext()->getOpObserver()->onConvertToCapped(
                txn,
                NamespaceString(collectionName),
                size);

        wunit.commit();
        return Status::OK();
    }
예제 #26
0
/**
 * If uuid is specified, add it to the collection specified by nss. This will error if the
 * collection already has a UUID.
 */
Status _collModInternal(OperationContext* opCtx,
                        const NamespaceString& nss,
                        const BSONObj& cmdObj,
                        BSONObjBuilder* result,
                        bool upgradeUniqueIndexes) {
    StringData dbName = nss.db();
    AutoGetDb autoDb(opCtx, dbName, MODE_X);
    Database* const db = autoDb.getDb();
    Collection* coll = db ? db->getCollection(opCtx, nss) : nullptr;

    // May also modify a view instead of a collection.
    boost::optional<ViewDefinition> view;
    if (db && !coll) {
        const auto sharedView = db->getViewCatalog()->lookup(opCtx, nss.ns());
        if (sharedView) {
            // We copy the ViewDefinition as it is modified below to represent the requested state.
            view = {*sharedView};
        }
    }

    // This can kill all cursors so don't allow running it while a background operation is in
    // progress.
    BackgroundOperation::assertNoBgOpInProgForNs(nss);

    // If db/collection/view does not exist, short circuit and return.
    if (!db || (!coll && !view)) {
        return Status(ErrorCodes::NamespaceNotFound, "ns does not exist");
    }

    // This is necessary to set up CurOp and update the Top stats.
    OldClientContext ctx(opCtx, nss.ns());

    bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
        !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss);

    if (userInitiatedWritesAndNotPrimary) {
        return Status(ErrorCodes::NotMaster,
                      str::stream() << "Not primary while setting collection options on "
                                    << nss.ns());
    }

    BSONObjBuilder oplogEntryBuilder;
    auto statusW = parseCollModRequest(opCtx, nss, coll, cmdObj, &oplogEntryBuilder);
    if (!statusW.isOK()) {
        return statusW.getStatus();
    }

    CollModRequest cmr = statusW.getValue();

    WriteUnitOfWork wunit(opCtx);

    // Handle collMod on a view and return early. The View Catalog handles the creation of oplog
    // entries for modifications on a view.
    if (view) {
        if (!cmr.viewPipeLine.eoo())
            view->setPipeline(cmr.viewPipeLine);

        if (!cmr.viewOn.empty())
            view->setViewOn(NamespaceString(dbName, cmr.viewOn));

        ViewCatalog* catalog = db->getViewCatalog();

        BSONArrayBuilder pipeline;
        for (auto& item : view->pipeline()) {
            pipeline.append(item);
        }
        auto errorStatus =
            catalog->modifyView(opCtx, nss, view->viewOn(), BSONArray(pipeline.obj()));
        if (!errorStatus.isOK()) {
            return errorStatus;
        }

        wunit.commit();
        return Status::OK();
    }

    // In order to facilitate the replication rollback process, which makes a best effort attempt to
    // "undo" a set of oplog operations, we store a snapshot of the old collection options to
    // provide to the OpObserver. TTL index updates aren't a part of collection options so we
    // save the relevant TTL index data in a separate object.

    CollectionOptions oldCollOptions = coll->getCatalogEntry()->getCollectionOptions(opCtx);
    boost::optional<TTLCollModInfo> ttlInfo;

    // Handle collMod operation type appropriately.

    // TTLIndex
    if (!cmr.indexExpireAfterSeconds.eoo()) {
        BSONElement& newExpireSecs = cmr.indexExpireAfterSeconds;
        BSONElement oldExpireSecs = cmr.idx->infoObj().getField("expireAfterSeconds");

        if (SimpleBSONElementComparator::kInstance.evaluate(oldExpireSecs != newExpireSecs)) {
            result->appendAs(oldExpireSecs, "expireAfterSeconds_old");

            // Change the value of "expireAfterSeconds" on disk.
            coll->getCatalogEntry()->updateTTLSetting(
                opCtx, cmr.idx->indexName(), newExpireSecs.safeNumberLong());

            // Notify the index catalog that the definition of this index changed.
            cmr.idx = coll->getIndexCatalog()->refreshEntry(opCtx, cmr.idx);
            result->appendAs(newExpireSecs, "expireAfterSeconds_new");
            opCtx->recoveryUnit()->onRollback([ opCtx, idx = cmr.idx, coll ]() {
                coll->getIndexCatalog()->refreshEntry(opCtx, idx);
            });
        }

        // Save previous TTL index expiration.
        ttlInfo = TTLCollModInfo{Seconds(newExpireSecs.safeNumberLong()),
                                 Seconds(oldExpireSecs.safeNumberLong()),
                                 cmr.idx->indexName()};
    }

    // The Validator, ValidationAction and ValidationLevel are already parsed and must be OK.
    if (!cmr.collValidator.eoo())
        invariant(coll->setValidator(opCtx, cmr.collValidator.Obj()));
    if (!cmr.collValidationAction.empty())
        invariant(coll->setValidationAction(opCtx, cmr.collValidationAction));
    if (!cmr.collValidationLevel.empty())
        invariant(coll->setValidationLevel(opCtx, cmr.collValidationLevel));

    // UsePowerof2Sizes
    if (!cmr.usePowerOf2Sizes.eoo())
        setCollectionOptionFlag(opCtx, coll, cmr.usePowerOf2Sizes, result);

    // NoPadding
    if (!cmr.noPadding.eoo())
        setCollectionOptionFlag(opCtx, coll, cmr.noPadding, result);

    // Upgrade unique indexes
    if (upgradeUniqueIndexes) {
        // A cmdObj with an empty collMod, i.e. nFields = 1, implies that it is a Unique Index
        // upgrade collMod.
        invariant(cmdObj.nFields() == 1);
        std::vector<std::string> indexNames;
        coll->getCatalogEntry()->getAllUniqueIndexes(opCtx, &indexNames);

        for (size_t i = 0; i < indexNames.size(); i++) {
            const IndexDescriptor* desc =
                coll->getIndexCatalog()->findIndexByName(opCtx, indexNames[i]);
            invariant(desc);

            // Update index metadata in storage engine.
            coll->getCatalogEntry()->updateIndexMetadata(opCtx, desc);

            // Refresh the in-memory instance of the index.
            desc = coll->getIndexCatalog()->refreshEntry(opCtx, desc);

            opCtx->recoveryUnit()->onRollback(
                [opCtx, desc, coll]() { coll->getIndexCatalog()->refreshEntry(opCtx, desc); });
        }
    }

    // Only observe non-view collMods, as view operations are observed as operations on the
    // system.views collection.
    getGlobalServiceContext()->getOpObserver()->onCollMod(
        opCtx, nss, coll->uuid(), oplogEntryBuilder.obj(), oldCollOptions, ttlInfo);

    wunit.commit();

    return Status::OK();
}
예제 #27
0
        static bool runImpl(OperationContext* txn,
                            const string& dbname,
                            const string& ns,
                            const BSONObj& query,
                            const BSONObj& fields,
                            const BSONObj& update,
                            const BSONObj& sort,
                            bool upsert,
                            bool returnNew,
                            bool remove ,
                            BSONObjBuilder& result,
                            string& errmsg) {

            AutoGetOrCreateDb autoDb(txn, dbname, MODE_IX);
            Lock::CollectionLock collLock(txn->lockState(), ns, MODE_IX);
            Client::Context ctx(txn, ns, autoDb.getDb(), autoDb.justCreated());

            if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname)) {
                return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
                    << "Not primary while running findAndModify in " << ns));
            }

            Collection* collection = ctx.db()->getCollection(ns);

            const WhereCallbackReal whereCallback(txn, StringData(ns));

            if ( !collection ) {
                if ( !upsert ) {
                    // no collectio and no upsert, so can't possible do anything
                    _appendHelper( result, BSONObj(), false, fields, whereCallback );
                    return true;
                }
                // no collection, but upsert, so we want to create it
                // problem is we only have IX on db and collection :(
                // so we tell our caller who can do it
                errmsg = "no-collection";
                return false;
            }

            Snapshotted<BSONObj> snapshotDoc;
            RecordId loc;
            bool found = false;
            {
                CanonicalQuery* cq;
                const BSONObj projection;
                const long long skip = 0;
                const long long limit = -1; // 1 document requested; negative indicates hard limit.
                uassertStatusOK(CanonicalQuery::canonicalize(ns,
                                                             query,
                                                             sort,
                                                             projection,
                                                             skip,
                                                             limit,
                                                             &cq,
                                                             whereCallback));

                PlanExecutor* rawExec;
                uassertStatusOK(getExecutor(txn,
                                            collection,
                                            cq,
                                            PlanExecutor::YIELD_AUTO,
                                            &rawExec,
                                            QueryPlannerParams::DEFAULT));

                scoped_ptr<PlanExecutor> exec(rawExec);

                PlanExecutor::ExecState state = exec->getNextSnapshotted(&snapshotDoc, &loc);
                if (PlanExecutor::ADVANCED == state) {
                    found = true;
                }
                else if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
                    if (PlanExecutor::FAILURE == state &&
                        WorkingSetCommon::isValidStatusMemberObject(snapshotDoc.value())) {
                        const Status errorStatus =
                            WorkingSetCommon::getMemberObjectStatus(snapshotDoc.value());
                        invariant(!errorStatus.isOK());
                        uasserted(errorStatus.code(), errorStatus.reason());
                    }
                    uasserted(ErrorCodes::OperationFailed,
                              str::stream() << "executor returned " << PlanExecutor::statestr(state)
                                            << " while finding document to update");
                }
                else {
                    invariant(PlanExecutor::IS_EOF == state);
                }
            }

            WriteUnitOfWork wuow(txn);
            if (found) {
                // We found a doc, but it might not be associated with the active snapshot.
                // If the doc has changed or is no longer in the collection, we will throw a
                // write conflict exception and start again from the beginning.
                if (txn->recoveryUnit()->getSnapshotId() != snapshotDoc.snapshotId()) {
                    BSONObj oldObj = snapshotDoc.value();
                    if (!collection->findDoc(txn, loc, &snapshotDoc)) {
                        // Got deleted in the new snapshot.
                        throw WriteConflictException();
                    }

                    if (!oldObj.binaryEqual(snapshotDoc.value())) {
                        // Got updated in the new snapshot.
                        throw WriteConflictException();
                    }
                }

                // If we get here without throwing, then we should have the copy of the doc from
                // the latest snapshot.
                invariant(txn->recoveryUnit()->getSnapshotId() == snapshotDoc.snapshotId());
            }

            BSONObj doc = snapshotDoc.value();
            BSONObj queryModified = query;
            if (found && !doc["_id"].eoo() && !CanonicalQuery::isSimpleIdQuery(query)) {
                // we're going to re-write the query to be more efficient
                // we have to be a little careful because of positional operators
                // maybe we can pass this all through eventually, but right now isn't an easy way
                
                bool hasPositionalUpdate = false;
                {
                    // if the update has a positional piece ($)
                    // then we need to pull all query parts in
                    // so here we check for $
                    // a little hacky
                    BSONObjIterator i( update );
                    while ( i.more() ) {
                        const BSONElement& elem = i.next();
                        
                        if ( elem.fieldName()[0] != '$' || elem.type() != Object )
                            continue;

                        BSONObjIterator j( elem.Obj() );
                        while ( j.more() ) {
                            if ( str::contains( j.next().fieldName(), ".$" ) ) {
                                hasPositionalUpdate = true;
                                break;
                            }
                        }
                    }
                }

                BSONObjBuilder b(query.objsize() + 10);
                b.append( doc["_id"] );
                
                bool addedAtomic = false;

                BSONObjIterator i(query);
                while ( i.more() ) {
                    const BSONElement& elem = i.next();

                    if ( str::equals( "_id" , elem.fieldName() ) ) {
                        // we already do _id
                        continue;
                    }
                    
                    if ( ! hasPositionalUpdate ) {
                        // if there is a dotted field, accept we may need more query parts
                        continue;
                    }
                    
                    if ( ! addedAtomic ) {
                        b.appendBool( "$atomic" , true );
                        addedAtomic = true;
                    }

                    b.append( elem );
                }

                queryModified = b.obj();
            }

            if ( remove ) {
                _appendHelper(result, doc, found, fields, whereCallback);
                if ( found ) {
                    deleteObjects(txn, ctx.db(), ns, queryModified, PlanExecutor::YIELD_MANUAL,
                                  true, true);
                    BSONObjBuilder le( result.subobjStart( "lastErrorObject" ) );
                    le.appendNumber( "n" , 1 );
                    le.done();
                }
            }
            else {
                // update
                if ( ! found && ! upsert ) {
                    // didn't have it, and am not upserting
                    _appendHelper(result, doc, found, fields, whereCallback);
                }
                else {
                    // we found it or we're updating
                    
                    if ( ! returnNew ) {
                        _appendHelper(result, doc, found, fields, whereCallback);
                    }
                    
                    const NamespaceString requestNs(ns);
                    UpdateRequest request(requestNs);

                    request.setQuery(queryModified);
                    request.setUpdates(update);
                    request.setUpsert(upsert);
                    request.setUpdateOpLog();
                    request.setStoreResultDoc(returnNew);

                    request.setYieldPolicy(PlanExecutor::YIELD_MANUAL);

                    // TODO(greg) We need to send if we are ignoring
                    // the shard version below, but for now no
                    UpdateLifecycleImpl updateLifecycle(false, requestNs);
                    request.setLifecycle(&updateLifecycle);
                    UpdateResult res = mongo::update(txn,
                                                     ctx.db(),
                                                     request,
                                                     &txn->getCurOp()->debug());

                    if (!found && res.existing) {
                        // No match was found during the read part of this find and modify, which
                        // means that we're here doing an upsert. But the update also told us that
                        // we modified an *already existing* document. This probably means that
                        // the query reported EOF based on an out-of-date snapshot. This should be
                        // a rare event, so we handle it by throwing a write conflict.
                        throw WriteConflictException();
                    }

                    if ( !collection ) {
                        // collection created by an upsert
                        collection = ctx.db()->getCollection(ns);
                    }

                    LOG(3) << "update result: "  << res ;
                    if (returnNew) {
                        dassert(!res.newObj.isEmpty());
                        _appendHelper(result, res.newObj, true, fields, whereCallback);
                    }

                    BSONObjBuilder le( result.subobjStart( "lastErrorObject" ) );
                    le.appendBool( "updatedExisting" , res.existing );
                    le.appendNumber( "n" , res.numMatched );
                    if ( !res.upserted.isEmpty() ) {
                        le.append( res.upserted[kUpsertedFieldName] );
                    }
                    le.done();
                }
            }

            // Committing the WUOW can close the current snapshot. Until this happens, the
            // snapshot id should not have changed.
            if (found) {
                invariant(txn->recoveryUnit()->getSnapshotId() == snapshotDoc.snapshotId());
            }
            wuow.commit();

            return true;
        }
예제 #28
0
    Status WriteCmd::explain(OperationContext* txn,
                             const std::string& dbname,
                             const BSONObj& cmdObj,
                             ExplainCommon::Verbosity verbosity,
                             BSONObjBuilder* out) const {
        // For now we only explain update and delete write commands.
        if ( BatchedCommandRequest::BatchType_Update != _writeType &&
             BatchedCommandRequest::BatchType_Delete != _writeType ) {
            return Status( ErrorCodes::IllegalOperation,
                           "Only update and delete write ops can be explained" );
        }

        // Parse the batch request.
        BatchedCommandRequest request( _writeType );
        std::string errMsg;
        if ( !request.parseBSON( cmdObj, &errMsg ) || !request.isValid( &errMsg ) ) {
            return Status( ErrorCodes::FailedToParse, errMsg );
        }

        // Note that this is a runCommmand, and therefore, the database and the collection name
        // are in different parts of the grammar for the command. But it's more convenient to
        // work with a NamespaceString. We built it here and replace it in the parsed command.
        // Internally, everything work with the namespace string as opposed to just the
        // collection name.
        NamespaceString nsString(dbname, request.getNS());
        request.setNSS(nsString);

        // Do the validation of the batch that is shared with non-explained write batches.
        Status isValid = WriteBatchExecutor::validateBatch( request );
        if (!isValid.isOK()) {
            return isValid;
        }

        // Explain must do one additional piece of validation: For now we only explain
        // singleton batches.
        if ( request.sizeWriteOps() != 1u ) {
            return Status( ErrorCodes::InvalidLength,
                           "explained write batches must be of size 1" );
        }

        // Get a reference to the singleton batch item (it's the 0th item in the batch).
        BatchItemRef batchItem( &request, 0 );

        if ( BatchedCommandRequest::BatchType_Update == _writeType ) {
            // Create the update request.
            UpdateRequest updateRequest( nsString );
            updateRequest.setQuery( batchItem.getUpdate()->getQuery() );
            updateRequest.setUpdates( batchItem.getUpdate()->getUpdateExpr() );
            updateRequest.setMulti( batchItem.getUpdate()->getMulti() );
            updateRequest.setUpsert( batchItem.getUpdate()->getUpsert() );
            updateRequest.setUpdateOpLog( true );
            UpdateLifecycleImpl updateLifecycle( true, updateRequest.getNamespaceString() );
            updateRequest.setLifecycle( &updateLifecycle );
            updateRequest.setExplain();

            // Explained updates can yield.
            updateRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO);

            OpDebug* debug = &txn->getCurOp()->debug();

            ParsedUpdate parsedUpdate( txn, &updateRequest );
            Status parseStatus = parsedUpdate.parseRequest();
            if ( !parseStatus.isOK() ) {
                return parseStatus;
            }

            // Explains of write commands are read-only, but we take write locks so
            // that timing info is more accurate.
            AutoGetDb autoDb( txn, nsString.db(), MODE_IX );
            Lock::CollectionLock colLock( txn->lockState(), nsString.ns(), MODE_IX );

            // We check the shard version explicitly here rather than using Client::Context,
            // as Context can do implicit database creation if the db does not exist. We want
            // explain to be a no-op that reports a trivial EOF plan against non-existent dbs
            // or collections.
            ensureShardVersionOKOrThrow( nsString.ns() );

            // Get a pointer to the (possibly NULL) collection.
            Collection* collection = NULL;
            if ( autoDb.getDb() ) {
                collection = autoDb.getDb()->getCollection( txn, nsString.ns() );
            }

            PlanExecutor* rawExec;
            uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug, &rawExec));
            boost::scoped_ptr<PlanExecutor> exec(rawExec);

            // Explain the plan tree.
            Explain::explainStages( exec.get(), verbosity, out );
            return Status::OK();
        }
        else {
            invariant( BatchedCommandRequest::BatchType_Delete == _writeType );

            // Create the delete request.
            DeleteRequest deleteRequest( nsString );
            deleteRequest.setQuery( batchItem.getDelete()->getQuery() );
            deleteRequest.setMulti( batchItem.getDelete()->getLimit() != 1 );
            deleteRequest.setUpdateOpLog(true);
            deleteRequest.setGod( false );
            deleteRequest.setExplain();

            // Explained deletes can yield.
            deleteRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO);

            ParsedDelete parsedDelete(txn, &deleteRequest);
            Status parseStatus = parsedDelete.parseRequest();
            if (!parseStatus.isOK()) {
                return parseStatus;
            }

            // Explains of write commands are read-only, but we take write locks so that timing
            // info is more accurate.
            AutoGetDb autoDb(txn, nsString.db(), MODE_IX);
            Lock::CollectionLock colLock(txn->lockState(), nsString.ns(), MODE_IX);

            // We check the shard version explicitly here rather than using Client::Context,
            // as Context can do implicit database creation if the db does not exist. We want
            // explain to be a no-op that reports a trivial EOF plan against non-existent dbs
            // or collections.
            ensureShardVersionOKOrThrow( nsString.ns() );

            // Get a pointer to the (possibly NULL) collection.
            Collection* collection = NULL;
            if (autoDb.getDb()) {
                collection = autoDb.getDb()->getCollection(txn, nsString.ns());
            }

            PlanExecutor* rawExec;
            uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete, &rawExec));
            boost::scoped_ptr<PlanExecutor> exec(rawExec);

            // Explain the plan tree.
            Explain::explainStages(exec.get(), verbosity, out);
            return Status::OK();
        }
    }