TEST(RecordStoreTestHarness, UpdateInPlace1) {
    unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
    unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());

    if (!rs->updateWithDamagesSupported())
        return;

    string s1 = "aaa111bbb";
    string s2 = "aaa222bbb";

    RecordId loc;
    const RecordData s1Rec(s1.c_str(), s1.size() + 1);
    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        {
            WriteUnitOfWork uow(opCtx.get());
            StatusWith<RecordId> res =
                rs->insertRecord(opCtx.get(), s1Rec.data(), s1Rec.size(), -1);
            ASSERT_OK(res.getStatus());
            loc = res.getValue();
            uow.commit();
        }
    }

    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        ASSERT_EQUALS(s1, rs->dataFor(opCtx.get(), loc).data());
    }

    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        {
            WriteUnitOfWork uow(opCtx.get());
            const char* damageSource = "222";
            mutablebson::DamageVector dv;
            dv.push_back(mutablebson::DamageEvent());
            dv[0].sourceOffset = 0;
            dv[0].targetOffset = 3;
            dv[0].size = 3;

            auto newRecStatus = rs->updateWithDamages(opCtx.get(), loc, s1Rec, damageSource, dv);
            ASSERT_OK(newRecStatus.getStatus());
            ASSERT_EQUALS(s2, newRecStatus.getValue().data());
            uow.commit();
        }
    }

    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        ASSERT_EQUALS(s2, rs->dataFor(opCtx.get(), loc).data());
    }
}
Example #2
0
int64_t WiredTigerUtil::getIdentSize(WT_SESSION* s, const std::string& uri) {
    StatusWith<int64_t> result = WiredTigerUtil::getStatisticsValueAs<int64_t>(
        s, "statistics:" + uri, "statistics=(size)", WT_STAT_DSRC_BLOCK_SIZE);
    const Status& status = result.getStatus();
    if (!status.isOK()) {
        if (status.code() == ErrorCodes::CursorNotFound) {
            // ident gone, so its 0
            return 0;
        }
        uassertStatusOK(status);
    }
    return result.getValue();
}
void ReplicationCoordinatorImpl::_startVoteRequester(long long newTerm) {
    invariant(_voteRequester);
    LoseElectionGuardV1 lossGuard(this);

    LockGuard lk(_topoMutex);

    const auto lastOpTime =
        _isDurableStorageEngine() ? getMyLastDurableOpTime() : getMyLastAppliedOpTime();

    _voteRequester.reset(new VoteRequester);
    StatusWith<ReplicationExecutor::EventHandle> nextPhaseEvh = _voteRequester->start(
        &_replExecutor, _rsConfig, _selfIndex, _topCoord->getTerm(), false, lastOpTime);
    if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
        return;
    }
    fassert(28643, nextPhaseEvh.getStatus());
    _replExecutor.onEvent(
        nextPhaseEvh.getValue(),
        stdx::bind(&ReplicationCoordinatorImpl::_onVoteRequestComplete, this, newTerm));

    lossGuard.dismiss();
}
 TEST(WiredTigerUtilTest, GetStatisticsValueValidKey) {
     WiredTigerUtilHarnessHelper harnessHelper("statistics=(all)");
     WiredTigerRecoveryUnit recoveryUnit(harnessHelper.getSessionCache());
     WiredTigerSession* session = recoveryUnit.getSession(NULL);
     WT_SESSION* wtSession = session->getSession();
     ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", NULL)));
     // Use connection statistics key which does not apply to a table.
     StatusWith<uint64_t> result = WiredTigerUtil::getStatisticsValue(session->getSession(),
         "statistics:table:mytable", "statistics=(fast)", WT_STAT_DSRC_LSM_CHUNK_COUNT);
     ASSERT_OK(result.getStatus());
     // Expect statistics value to be zero for a LSM key on a Btree.
     ASSERT_EQUALS(0U, result.getValue());
 }
    Status KVRecordStoreCapped::oplogDiskLocRegister(OperationContext* txn,
                                                     const OpTime& opTime) {
        if (!_engineSupportsDocLocking) {
            return Status::OK();
        }

        StatusWith<RecordId> loc = oploghack::keyForOptime( opTime );
        if ( !loc.isOK() )
            return loc.getStatus();

        _idTracker->addUncommittedId(txn, loc.getValue());
        return Status::OK();
    }
Status NativeSaslClientSession::step(const StringData& inputData, std::string* outputData) {
    if (!_saslConversation) {
        return Status(ErrorCodes::BadValue,
                      mongoutils::str::stream()
                          << "The client authentication session has not been properly initialized");
    }

    StatusWith<bool> status = _saslConversation->step(inputData, outputData);
    if (status.isOK()) {
        _done = status.getValue();
    }
    return status.getStatus();
}
    TEST( RecordStoreTestHarness, Truncate1 ) {
        scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
        scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
        }

        string s = "eliot was here";

        DiskLoc loc;
        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                WriteUnitOfWork uow( opCtx.get() );
                StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
                ASSERT_OK( res.getStatus() );
                loc = res.getValue();
                uow.commit();
            }

        }


        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( s, rs->dataFor( opCtx.get(), loc ).data() );
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                WriteUnitOfWork uow( opCtx.get() );
                rs->truncate( opCtx.get() );
                uow.commit();
            }

        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
        }

    }
 bool NetworkInterfaceMockWithMap::addResponse(
         const ReplicationExecutor::RemoteCommandRequest& request,
         const StatusWith<BSONObj>& response,
         bool isBlocked) {
     boost::lock_guard<boost::mutex> lk(_mutex);
     return _responses.insert(std::make_pair(request,
                                             BlockableResponseStatus(
                                                  !response.isOK() ?
                                                         ResponseStatus(response.getStatus()) :
                                                         ResponseStatus(Response(
                                                                            response.getValue(),
                                                                            Milliseconds(0)))
                                                  , isBlocked))).second;
 }
Example #9
0
Status Collection::setValidationAction(OperationContext* txn, StringData newAction) {
    invariant(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));

    StatusWith<ValidationAction> status = _parseValidationAction(newAction);
    if (!status.isOK()) {
        return status.getStatus();
    }

    _validationAction = status.getValue();

    _details->updateValidator(txn, _validatorDoc, getValidationLevel(), getValidationAction());

    return Status::OK();
}
    // Insert a record and try to call updateWithDamages() with an empty DamageVector.
    TEST( RecordStoreTestHarness, UpdateWithNoDamages ) {
        scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
        scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
        }

        string data = "my record";
        DiskLoc loc;
        const RecordData rec(data.c_str(), data.size() + 1);
        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                WriteUnitOfWork uow( opCtx.get() );
                StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
                                                            rec.data(),
                                                            rec.size(),
                                                            false );
                ASSERT_OK( res.getStatus() );
                loc = res.getValue();
                uow.commit();
            }
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                mutablebson::DamageVector dv;

                WriteUnitOfWork uow( opCtx.get() );
                ASSERT_OK( rs->updateWithDamages( opCtx.get(), loc, rec, "", dv ) );
                uow.commit();
            }
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                RecordData record = rs->dataFor( opCtx.get(), loc );
                ASSERT_EQUALS( data, record.data() );
            }
        }
    }
// Insert multiple records and create an iterator for repairing the record store,
// even though the it has not been corrupted.
TEST(RecordStoreTestHarness, GetIteratorForRepairNonEmpty) {
    unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
    unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());

    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
    }

    const int nToInsert = 10;
    RecordId locs[nToInsert];
    for (int i = 0; i < nToInsert; i++) {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        {
            stringstream ss;
            ss << "record " << i;
            string data = ss.str();

            WriteUnitOfWork uow(opCtx.get());
            StatusWith<RecordId> res =
                rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
            ASSERT_OK(res.getStatus());
            locs[i] = res.getValue();
            uow.commit();
        }
    }

    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
    }

    set<RecordId> remain(locs, locs + nToInsert);
    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        auto cursor = rs->getCursorForRepair(opCtx.get());
        // returns NULL if getCursorForRepair is not supported
        if (!cursor) {
            return;
        }

        while (auto record = cursor->next()) {
            remain.erase(record->id);  // can happen more than once per doc
        }
        ASSERT(remain.empty());

        ASSERT(!cursor->next());
    }
}
    ResponseStatus NetworkInterfaceMock::runCommand(
            const ReplicationExecutor::RemoteCommandRequest& request) {
        boost::unique_lock<boost::mutex> lk(_mutex);
        Date_t wakeupTime = _now + _simulatedNetworkLatencyMillis;
        while (wakeupTime < _now) {
            _timeElapsed.wait(lk);
        }

        StatusWith<int> toStatus = getTimeoutMillis(request.expirationDate, _now);
        if (!toStatus.isOK())
            return ResponseStatus(toStatus.getStatus());

        lk.unlock();
        return _helper(request);
    }
 Status ScatterGatherRunner::run(ReplicationExecutor* executor) {
     StatusWith<ReplicationExecutor::EventHandle> finishEvh(ErrorCodes::InternalError,
                                                            "Not set");
     StatusWith<ReplicationExecutor::CallbackHandle> startCBH = executor->scheduleWork(
             stdx::bind(startTrampoline, stdx::placeholders::_1, this, &finishEvh));
     if (!startCBH.isOK()) {
         return startCBH.getStatus();
     }
     executor->wait(startCBH.getValue());
     if (!finishEvh.isOK()) {
         return finishEvh.getStatus();
     }
     executor->waitForEvent(finishEvh.getValue());
     return Status::OK();
 }
    TEST( RocksRecordStoreTest, UpdateInPlace1 ) {
        unittest::TempDir td( _rocksRecordStoreTestDir );
        scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );

        {
            RocksRecordStore rs( "foo.bar", db.get(),
                                 db->DefaultColumnFamily(),
                                 db->DefaultColumnFamily() );
            string s1 = "aaa111bbb";
            string s2 = "aaa222bbb";

            DiskLoc loc;
            {
                MyOperationContext opCtx( db.get() );
                {
                    WriteUnitOfWork uow( opCtx.recoveryUnit() );
                    StatusWith<DiskLoc> res = rs.insertRecord( &opCtx,
                                                               s1.c_str(),
                                                               s1.size() + 1,
                                                               -1 );
                    ASSERT_OK( res.getStatus() );
                    loc = res.getValue();
                }

                ASSERT_EQUALS( s1, rs.dataFor( loc ).data() );
            }

            {
                MyOperationContext opCtx( db.get() );
                {
                    WriteUnitOfWork uow( opCtx.recoveryUnit() );
                    const char* damageSource = "222";
                    mutablebson::DamageVector dv;
                    dv.push_back( mutablebson::DamageEvent() );
                    dv[0].sourceOffset = 0;
                    dv[0].targetOffset = 3;
                    dv[0].size = 3;
                    Status res = rs.updateWithDamages( &opCtx,
                                                       loc,
                                                       damageSource,
                                                       dv );
                    ASSERT_OK( res );
                }
                ASSERT_EQUALS( s2, rs.dataFor( loc ).data() );
            }

        }
    }
Example #15
0
    virtual bool run(OperationContext* txn,
                     const string& dbname,
                     BSONObj& cmdObj,
                     int,
                     string& errmsg,
                     BSONObjBuilder& result) {
        boost::optional<DisableDocumentValidation> maybeDisableValidation;
        if (shouldBypassDocumentValidationForCommand(cmdObj))
            maybeDisableValidation.emplace(txn);

        if (cmdObj.firstElement().type() != Array) {
            errmsg = "ops has to be an array";
            return false;
        }

        BSONObj ops = cmdObj.firstElement().Obj();

        {
            // check input
            BSONObjIterator i(ops);
            while (i.more()) {
                BSONElement e = i.next();
                if (!_checkOperation(e, errmsg)) {
                    return false;
                }
            }
        }

        StatusWith<WriteConcernOptions> wcResult = extractWriteConcern(txn, cmdObj, dbname);
        if (!wcResult.isOK()) {
            return appendCommandStatus(result, wcResult.getStatus());
        }
        txn->setWriteConcern(wcResult.getValue());
        setupSynchronousCommit(txn);

        auto applyOpsStatus = appendCommandStatus(result, applyOps(txn, dbname, cmdObj, &result));

        WriteConcernResult res;
        auto waitForWCStatus =
            waitForWriteConcern(txn,
                                repl::ReplClientInfo::forClient(txn->getClient()).getLastOp(),
                                txn->getWriteConcern(),
                                &res);
        appendCommandWCStatus(result, waitForWCStatus);

        return applyOpsStatus;
    }
    Status WiredTigerKVEngine::createRecordStore( OperationContext* opCtx,
                                                  const StringData& ns,
                                                  const StringData& ident,
                                                  const CollectionOptions& options ) {
        WiredTigerSession session( _conn, -1 );

        StatusWith<std::string> result = WiredTigerRecordStore::generateCreateString(ns, options, _rsOptions);
        if (!result.isOK()) {
            return result.getStatus();
        }
        std::string config = result.getValue();

        string uri = _uri( ident );
        WT_SESSION* s = session.getSession();
        LOG(1) << "WiredTigerKVEngine::createRecordStore uri: " << uri << " config: " << config;
        return wtRCToStatus( s->create( s, uri.c_str(), config.c_str() ) );
    }
Example #17
0
    void KVCatalog::putMetaData( OperationContext* opCtx,
                                 const StringData& ns,
                                 BSONCollectionCatalogEntry::MetaData& md ) {
        DiskLoc loc;
        BSONObj obj = _findEntry( opCtx, ns, &loc );

        {
            // rebuilt doc
            BSONObjBuilder b;
            b.append( "md", md.toBSON() );

            BSONObjBuilder newIdentMap;
            BSONObj oldIdentMap;
            if ( obj["idxIdent"].isABSONObj() )
                oldIdentMap = obj["idxIdent"].Obj();

            // fix ident map
            for ( size_t i = 0; i < md.indexes.size(); i++ ) {
                string name = md.indexes[i].name();
                BSONElement e = oldIdentMap[name];
                if ( e.type() == String ) {
                    newIdentMap.append( e );
                    continue;
                }
                // missing, create new
                std::stringstream ss;
                ss << getCollectionIdent( ns ) << '$' << name
                   << '-' << _rand << '-' << _next.fetchAndAdd( 1 );
                newIdentMap.append( name, ss.str() );
            }
            b.append( "idxIdent", newIdentMap.obj() );

            // add whatever is left
            b.appendElementsUnique( obj );
            obj = b.obj();
        }

        StatusWith<DiskLoc> status = _rs->updateRecord( opCtx,
                                                        loc,
                                                        obj.objdata(),
                                                        obj.objsize(),
                                                        false,
                                                        NULL );
        fassert( 28521, status.getStatus() );
        invariant( status.getValue() == loc );
    }
    // Insert multiple records, and verify that calling truncate() on a nonempty collection
    // removes all of them and returns an OK status.
    TEST( RecordStoreTestHarness, TruncateNonEmpty ) {
        scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
        scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
        }

        int nToInsert = 10;
        for ( int i = 0; i < nToInsert; i++ ) {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                stringstream ss;
                ss << "record " << i;
                string data = ss.str();

                WriteUnitOfWork uow( opCtx.get() );
                StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
                                                            data.c_str(),
                                                            data.size() + 1,
                                                            false );
                ASSERT_OK( res.getStatus() );
                uow.commit();
            }
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                WriteUnitOfWork uow( opCtx.get() );
                ASSERT_OK( rs->truncate( opCtx.get() ) );
                uow.commit();
            }
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
        }
    }
Example #19
0
bool setUpSecurityKey(const string& filename) {
    StatusWith<std::string> keyString = mongo::readSecurityFile(filename);
    if (!keyString.isOK()) {
        log() << keyString.getStatus().reason();
        return false;
    }

    std::string str = std::move(keyString.getValue());
    const unsigned long long keyLength = str.size();
    if (keyLength < 6 || keyLength > 1024) {
        log() << " security key in " << filename << " has length " << keyLength
              << ", must be between 6 and 1024 chars";
        return false;
    }

    // Generate MONGODB-CR and SCRAM credentials for the internal user based on
    // the keyfile.
    User::CredentialData credentials;
    credentials.password =
        mongo::createPasswordDigest(internalSecurity.user->getName().getUser().toString(), str);

    BSONObj creds =
        scram::generateCredentials(credentials.password, saslGlobalParams.scramIterationCount);
    credentials.scram.iterationCount = creds[scram::iterationCountFieldName].Int();
    credentials.scram.salt = creds[scram::saltFieldName].String();
    credentials.scram.storedKey = creds[scram::storedKeyFieldName].String();
    credentials.scram.serverKey = creds[scram::serverKeyFieldName].String();

    internalSecurity.user->setCredentials(credentials);

    int clusterAuthMode = serverGlobalParams.clusterAuthMode.load();
    if (clusterAuthMode == ServerGlobalParams::ClusterAuthMode_keyFile ||
        clusterAuthMode == ServerGlobalParams::ClusterAuthMode_sendKeyFile) {
        setInternalUserAuthParams(
            BSON(saslCommandMechanismFieldName << "SCRAM-SHA-1" << saslCommandUserDBFieldName
                                               << internalSecurity.user->getName().getDB()
                                               << saslCommandUserFieldName
                                               << internalSecurity.user->getName().getUser()
                                               << saslCommandPasswordFieldName
                                               << credentials.password
                                               << saslCommandDigestPasswordFieldName
                                               << false));
    }

    return true;
}
// Insert multiple records and try to delete them.
TEST(RecordStoreTestHarness, DeleteMultipleRecords) {
    unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
    unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());

    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
    }

    const int nToInsert = 10;
    RecordId locs[nToInsert];
    for (int i = 0; i < nToInsert; i++) {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        {
            stringstream ss;
            ss << "record " << i;
            string data = ss.str();

            WriteUnitOfWork uow(opCtx.get());
            StatusWith<RecordId> res =
                rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
            ASSERT_OK(res.getStatus());
            locs[i] = res.getValue();
            uow.commit();
        }
    }

    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
    }

    for (int i = 0; i < nToInsert; i++) {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        {
            WriteUnitOfWork uow(opCtx.get());
            rs->deleteRecord(opCtx.get(), locs[i]);
            uow.commit();
        }
    }

    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
    }
}
Example #21
0
void KVCatalog::putMetaData(OperationContext* opCtx,
                            StringData ns,
                            BSONCollectionCatalogEntry::MetaData& md) {
    std::unique_ptr<Lock::ResourceLock> rLk;
    if (!_isRsThreadSafe && opCtx->lockState()) {
        rLk.reset(new Lock::ResourceLock(opCtx->lockState(), resourceIdCatalogMetadata, MODE_X));
    }

    RecordId loc;
    BSONObj obj = _findEntry(opCtx, ns, &loc);

    {
        // rebuilt doc
        BSONObjBuilder b;
        b.append("md", md.toBSON());

        BSONObjBuilder newIdentMap;
        BSONObj oldIdentMap;
        if (obj["idxIdent"].isABSONObj())
            oldIdentMap = obj["idxIdent"].Obj();

        // fix ident map
        for (size_t i = 0; i < md.indexes.size(); i++) {
            string name = md.indexes[i].name();
            BSONElement e = oldIdentMap[name];
            if (e.type() == String) {
                newIdentMap.append(e);
                continue;
            }
            // missing, create new
            newIdentMap.append(name, _newUniqueIdent(ns, "index"));
        }
        b.append("idxIdent", newIdentMap.obj());

        // add whatever is left
        b.appendElementsUnique(obj);
        obj = b.obj();
    }

    LOG(3) << "recording new metadata: " << obj;
    StatusWith<RecordId> status =
        _rs->updateRecord(opCtx, loc, obj.objdata(), obj.objsize(), false, NULL);
    fassert(28521, status.getStatus());
    invariant(status.getValue() == loc);
}
// Create multiple iterators over a nonempty record store.
TEST(RecordStoreTestHarness, GetManyIteratorsNonEmpty) {
    unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
    unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());

    {
        ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
        ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
    }

    const int nToInsert = 10;
    RecordId locs[nToInsert];
    for (int i = 0; i < nToInsert; i++) {
        ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
        {
            stringstream ss;
            ss << "record " << i;
            string data = ss.str();

            WriteUnitOfWork uow(opCtx.get());
            StatusWith<RecordId> res =
                rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
            ASSERT_OK(res.getStatus());
            locs[i] = res.getValue();
            uow.commit();
        }
    }

    {
        ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
        ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
    }

    set<RecordId> remain(locs, locs + nToInsert);
    {
        ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
        for (auto&& cursor : rs->getManyCursors(opCtx.get())) {
            while (auto record = cursor->next()) {
                ASSERT_EQ(remain.erase(record->id), size_t(1));
            }

            ASSERT(!cursor->next());
        }
        ASSERT(remain.empty());
    }
}
Example #23
0
    void CollectionCloner::_listIndexesCallback(const StatusWith<Fetcher::BatchData>& fetchResult,
                                                Fetcher::NextAction* nextAction,
                                                BSONObjBuilder* getMoreBob) {
        boost::lock_guard<boost::mutex> lk(_mutex);

        _active = false;

        if (!fetchResult.isOK()) {
            _work(fetchResult.getStatus());
            return;
        }

        auto batchData(fetchResult.getValue());
        auto&& documents = batchData.documents;

        if (documents.empty()) {
            warning() << "No indexes found for collection " <<  _sourceNss.ns()
                      << " while cloning from " << _source;
        }

        // We may be called with multiple batches leading to a need to grow _indexSpecs.
        _indexSpecs.reserve(_indexSpecs.size() + documents.size());
        _indexSpecs.insert(_indexSpecs.end(), documents.begin(), documents.end());

        // The fetcher will continue to call with kGetMore until an error or the last batch.
        if (*nextAction == Fetcher::NextAction::kGetMore) {
            invariant(getMoreBob);
            getMoreBob->append("getMore", batchData.cursorId);
            getMoreBob->append("collection", batchData.nss.coll());

            _active = true;
            return;
        }

        // We have all of the indexes now, so we can start cloning the collection data.
        auto&& scheduleResult = _scheduleDbWorkFn(
            stdx::bind(&CollectionCloner::_beginCollectionCallback, this, stdx::placeholders::_1));
        if (!scheduleResult.isOK()) {
            _work(scheduleResult.getStatus());
            return;
        }

        _active = true;
        _dbWorkCallbackHandle = scheduleResult.getValue();
    }
Status EphemeralForTestRecordStore::insertRecords(OperationContext* opCtx,
                                                  std::vector<Record>* inOutRecords,
                                                  const std::vector<Timestamp>& timestamps) {

    for (auto& record : *inOutRecords) {
        if (_isCapped && record.data.size() > _cappedMaxSize) {
            // We use dataSize for capped rollover and we don't want to delete everything if we know
            // this won't fit.
            return Status(ErrorCodes::BadValue, "object to insert exceeds cappedMaxSize");
        }
    }
    const auto insertSingleFn = [this, opCtx](Record* record) {
        stdx::lock_guard<stdx::recursive_mutex> lock(_data->recordsMutex);
        EphemeralForTestRecord rec(record->data.size());
        memcpy(rec.data.get(), record->data.data(), record->data.size());

        RecordId loc;
        if (_data->isOplog) {
            StatusWith<RecordId> status =
                extractAndCheckLocForOplog(record->data.data(), record->data.size());
            if (!status.isOK())
                return status.getStatus();
            loc = status.getValue();
        } else {
            loc = allocateLoc();
        }

        _data->dataSize += record->data.size();
        _data->records[loc] = rec;
        record->id = loc;

        opCtx->recoveryUnit()->registerChange(new InsertChange(opCtx, _data, loc));
        cappedDeleteAsNeeded_inlock(opCtx);

        return Status::OK();
    };

    for (auto& record : *inOutRecords) {
        auto status = insertSingleFn(&record);
        if (!status.isOK())
            return status;
    }

    return Status::OK();
}
Example #25
0
    TEST( RocksEngineTest, Restart1 ) {
        std::string path = "/tmp/mongo-rocks-engine-test";
        boost::filesystem::remove_all( path );

        string s = "eliot was here";
        DiskLoc loc;

        {
            RocksEngine engine( path );

            {
                MyOperationContext opCtx( &engine );
                WriteUnitOfWork uow( opCtx.recoveryUnit() );
                Status status = engine.createCollection( &opCtx,
                                                     "test.foo",
                                                     CollectionOptions() );
                ASSERT_OK( status );
                uow.commit();
            }

            RocksRecordStore* rs = engine.getEntry( "test.foo" )->recordStore.get();

            {
                MyOperationContext opCtx( &engine );

                {
                    WriteUnitOfWork uow( opCtx.recoveryUnit() );
                    StatusWith<DiskLoc> res = rs->insertRecord( &opCtx, s.c_str(), s.size() + 1, -1 );
                    ASSERT_OK( res.getStatus() );
                    loc = res.getValue();
                    uow.commit();
                }

                ASSERT_EQUALS( s, rs->dataFor( loc ).data() );
                engine.cleanShutdown( &opCtx );
            }
        }

        {
            RocksEngine engine( path );
            RocksRecordStore* rs = engine.getEntry( "test.foo" )->recordStore.get();
            ASSERT_EQUALS( s, rs->dataFor( loc ).data() );
        }

    }
Example #26
0
std::vector<std::tuple<FTDCBSONUtil::FTDCType, BSONObj, Date_t>>
FTDCFileManager::recoverInterimFile() {
    decltype(recoverInterimFile()) docs;

    auto interimFile = FTDCUtil::getInterimFile(_path);

    // Nothing to do if it does not exist
    if (!boost::filesystem::exists(interimFile)) {
        return docs;
    }

    size_t size = boost::filesystem::file_size(interimFile);
    if (size == 0) {
        return docs;
    }

    FTDCFileReader read;
    auto s = read.open(interimFile);
    if (!s.isOK()) {
        log() << "Unclean full-time diagnostic data capture shutdown detected, found interim file, "
                 "but failed "
                 "to open it, some "
                 "metrics may have been lost. " << s;

        // Note: We ignore any actual errors as reading from the interim files is a best-effort
        return docs;
    }

    StatusWith<bool> m = read.hasNext();
    for (; m.isOK() && m.getValue(); m = read.hasNext()) {
        auto triplet = read.next();
        docs.emplace_back(std::tuple<FTDCBSONUtil::FTDCType, BSONObj, Date_t>(
            std::get<0>(triplet), std::get<1>(triplet).getOwned(), std::get<2>(triplet)));
    }

    // Warn if the interim file was corrupt or we had an unclean shutdown
    if (!m.isOK() || !docs.empty()) {
        log() << "Unclean full-time diagnostic data capture shutdown detected, found interim file, "
                 "some "
                 "metrics may have been lost. " << m.getStatus();
    }

    // Note: We ignore any actual errors as reading from the interim files is a best-effort
    return docs;
}
// Insert a single record. Create a repair iterator pointing to that single record.
// Then invalidate the record and ensure that the repair iterator responds correctly.
// See SERVER-16300.
TEST(RecordStoreTestHarness, GetIteratorForRepairInvalidateSingleton) {
    unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
    unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());

    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        ASSERT_EQ(0, rs->numRecords(opCtx.get()));
    }

    // Insert one record.
    RecordId idToInvalidate;
    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        WriteUnitOfWork uow(opCtx.get());
        StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "some data", 10, false);
        ASSERT_OK(res.getStatus());
        idToInvalidate = res.getValue();
        uow.commit();
    }

    // Double-check that the record store has one record in it now.
    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        ASSERT_EQ(1, rs->numRecords(opCtx.get()));
    }

    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        auto cursor = rs->getCursorForRepair(opCtx.get());
        // returns NULL if getCursorForRepair is not supported
        if (!cursor) {
            return;
        }

        // We should be pointing at the only record in the store.

        // Invalidate the record we're pointing at.
        cursor->save();
        cursor->invalidate(opCtx.get(), idToInvalidate);
        cursor->restore();

        // Iterator should be EOF now because the only thing in the collection got deleted.
        ASSERT(!cursor->next());
    }
}
Example #28
0
Status KVCatalog::renameCollection(OperationContext* opCtx,
                                   StringData fromNS,
                                   StringData toNS,
                                   bool stayTemp) {
    std::unique_ptr<Lock::ResourceLock> rLk;
    if (!_isRsThreadSafe && opCtx->lockState()) {
        rLk.reset(new Lock::ResourceLock(opCtx->lockState(), resourceIdCatalogMetadata, MODE_X));
    }

    RecordId loc;
    BSONObj old = _findEntry(opCtx, fromNS, &loc).getOwned();
    {
        BSONObjBuilder b;

        b.append("ns", toNS);

        BSONCollectionCatalogEntry::MetaData md;
        md.parse(old["md"].Obj());
        md.rename(toNS);
        if (!stayTemp)
            md.options.temp = false;
        b.append("md", md.toBSON());

        b.appendElementsUnique(old);

        BSONObj obj = b.obj();
        StatusWith<RecordId> status =
            _rs->updateRecord(opCtx, loc, obj.objdata(), obj.objsize(), false, NULL);
        fassert(28522, status.getStatus());
        invariant(status.getValue() == loc);
    }

    stdx::lock_guard<stdx::mutex> lk(_identsLock);
    const NSToIdentMap::iterator fromIt = _idents.find(fromNS.toString());
    invariant(fromIt != _idents.end());

    opCtx->recoveryUnit()->registerChange(new RemoveIdentChange(this, fromNS, fromIt->second));
    opCtx->recoveryUnit()->registerChange(new AddIdentChange(this, toNS));

    _idents.erase(fromIt);
    _idents[toNS.toString()] = Entry(old["ident"].String(), loc);

    return Status::OK();
}
Example #29
0
StatusWith<ShardRegistry::CommandResponse> ShardRegistry::_runCommandWithMetadata(
    TaskExecutor* executor,
    const HostAndPort& host,
    const std::string& dbName,
    const BSONObj& cmdObj,
    const BSONObj& metadata) {
    StatusWith<executor::RemoteCommandResponse> responseStatus =
        Status(ErrorCodes::InternalError, "Internal error running command");

    executor::RemoteCommandRequest request(host, dbName, cmdObj, metadata, kConfigCommandTimeout);
    auto callStatus =
        executor->scheduleRemoteCommand(request,
                                        [&responseStatus](const RemoteCommandCallbackArgs& args) {
                                            responseStatus = args.response;
                                        });
    if (!callStatus.isOK()) {
        return callStatus.getStatus();
    }

    // Block until the command is carried out
    executor->wait(callStatus.getValue());

    if (!responseStatus.isOK()) {
        return responseStatus.getStatus();
    }

    auto response = responseStatus.getValue();

    CommandResponse cmdResponse;
    cmdResponse.response = response.data;

    if (response.metadata.hasField(rpc::kReplSetMetadataFieldName)) {
        auto replParseStatus = rpc::ReplSetMetadata::readFromMetadata(response.metadata);

        if (!replParseStatus.isOK()) {
            return replParseStatus.getStatus();
        }

        // TODO: SERVER-19734 use config server snapshot time.
        cmdResponse.opTime = replParseStatus.getValue().getLastOpCommitted();
    }

    return cmdResponse;
}
TEST(KVEngineTestHarness, Restart1) {
    unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create());
    KVEngine* engine = helper->getEngine();
    ASSERT(engine);

    string ns = "a.b";

    // 'loc' holds location of "abc" and is referenced after restarting engine.
    RecordId loc;
    {
        unique_ptr<RecordStore> rs;
        {
            MyOperationContext opCtx(engine);
            ASSERT_OK(engine->createRecordStore(&opCtx, ns, ns, CollectionOptions()));
            rs.reset(engine->getRecordStore(&opCtx, ns, ns, CollectionOptions()));
            ASSERT(rs);
        }

        {
            MyOperationContext opCtx(engine);
            WriteUnitOfWork uow(&opCtx);
            StatusWith<RecordId> res = rs->insertRecord(&opCtx, "abc", 4, false);
            ASSERT_OK(res.getStatus());
            loc = res.getValue();
            uow.commit();
        }

        {
            MyOperationContext opCtx(engine);
            ASSERT_EQUALS(string("abc"), rs->dataFor(&opCtx, loc).data());
        }
    }

    // returns null if the engine does not support restart (transient, test-only engines)
    engine = helper->restartEngine();

    if (engine != NULL) {
        unique_ptr<RecordStore> rs;
        MyOperationContext opCtx(engine);
        rs.reset(engine->getRecordStore(&opCtx, ns, ns, CollectionOptions()));
        ASSERT_EQUALS(string("abc"), rs->dataFor(&opCtx, loc).data());
    }
}