예제 #1
0
void setGlobalTimestamp(const Timestamp& newTime) {
    stdx::lock_guard<stdx::mutex> lk(globalTimestampMutex);
    globalTimestamp = newTime;
}
예제 #2
0
void LoadAs(const std::string& filename)
{
	if (!Core::IsRunning())
		return;

	// Stop the core while we load the state
	bool wasUnpaused = Core::PauseAndLock(true);

	g_loadDepth++;

	// Save temp buffer for undo load state
	if (!Movie::IsJustStartingRecordingInputFromSaveState())
	{
		std::lock_guard<std::mutex> lk(g_cs_undo_load_buffer);
		SaveToBuffer(g_undo_load_buffer);
		if (Movie::IsMovieActive())
			Movie::SaveRecording(File::GetUserPath(D_STATESAVES_IDX) + "undo.dtm");
		else if (File::Exists(File::GetUserPath(D_STATESAVES_IDX) +"undo.dtm"))
			File::Delete(File::GetUserPath(D_STATESAVES_IDX) + "undo.dtm");
	}

	bool loaded = false;
	bool loadedSuccessfully = false;
	std::string version_created_by;

	// brackets here are so buffer gets freed ASAP
	{
		std::vector<u8> buffer;
		LoadFileStateData(filename, buffer);

		if (!buffer.empty())
		{
			u8 *ptr = &buffer[0];
			PointerWrap p(&ptr, PointerWrap::MODE_READ);
			version_created_by = DoState(p);
			loaded = true;
			loadedSuccessfully = (p.GetMode() == PointerWrap::MODE_READ);
		}
	}

	if (loaded)
	{
		if (loadedSuccessfully)
		{
			Core::DisplayMessage(StringFromFormat("Loaded state from %s", filename.c_str()), 2000);
			if (File::Exists(filename + ".dtm"))
				Movie::LoadInput(filename + ".dtm");
			else if (!Movie::IsJustStartingRecordingInputFromSaveState() && !Movie::IsJustStartingPlayingInputFromSaveState())
				Movie::EndPlayInput(false);
		}
		else
		{
			// failed to load
			Core::DisplayMessage("Unable to load: Can't load state from other versions!", 4000);
			if (!version_created_by.empty())
				Core::DisplayMessage("The savestate was created using " + version_created_by, 4000);

			// since we could be in an inconsistent state now (and might crash or whatever), undo.
			if (g_loadDepth < 2)
				UndoLoadState();
		}
	}

	if (g_onAfterLoadCb)
		g_onAfterLoadCb();

	g_loadDepth--;

	// resume dat core
	Core::PauseAndLock(false, wasUnpaused);
}
예제 #3
0
파일: config.cpp 프로젝트: gilles/mongo
 void DBConfig::setPrimary( string s ){
     scoped_lock lk( _lock );
     _primary.reset( s );
     _save();
 }
예제 #4
0
    void* MemoryMappedFile::map(const char *filenameIn, unsigned long long &length, int options) {
        verify( fd == 0 && len == 0 ); // can't open more than once
        setFilename(filenameIn);
        FileAllocator::get()->allocateAsap( filenameIn, length );
        /* big hack here: Babble uses db names with colons.  doesn't seem to work on windows.  temporary perhaps. */
        char filename[256];
        strncpy(filename, filenameIn, 255);
        filename[255] = 0;
        {
            size_t len = strlen( filename );
            for ( size_t i=len-1; i>=0; i-- ) {
                if ( filename[i] == '/' ||
                        filename[i] == '\\' )
                    break;

                if ( filename[i] == ':' )
                    filename[i] = '_';
            }
        }

        updateLength( filename, length );

        {
            DWORD createOptions = FILE_ATTRIBUTE_NORMAL;
            if ( options & SEQUENTIAL )
                createOptions |= FILE_FLAG_SEQUENTIAL_SCAN;
            DWORD rw = GENERIC_READ | GENERIC_WRITE;
            fd = CreateFileW(
                     toWideString(filename).c_str(),
                     rw, // desired access
                     FILE_SHARE_WRITE | FILE_SHARE_READ, // share mode
                     NULL, // security
                     OPEN_ALWAYS, // create disposition
                     createOptions , // flags
                     NULL); // hTempl
            if ( fd == INVALID_HANDLE_VALUE ) {
                DWORD dosError = GetLastError();
                log() << "CreateFileW for " << filename
                        << " failed with " << errnoWithDescription( dosError )
                        << " (file size is " << length << ")"
                        << " in MemoryMappedFile::map"
                        << endl;
                return 0;
            }
        }

        mapped += length;

        {
            DWORD flProtect = PAGE_READWRITE; //(options & READONLY)?PAGE_READONLY:PAGE_READWRITE;
            maphandle = CreateFileMappingW(fd, NULL, flProtect,
                                          length >> 32 /*maxsizehigh*/,
                                          (unsigned) length /*maxsizelow*/,
                                          NULL/*lpName*/);
            if ( maphandle == NULL ) {
                DWORD dosError = GetLastError();
                log() << "CreateFileMappingW for " << filename
                        << " failed with " << errnoWithDescription( dosError )
                        << " (file size is " << length << ")"
                        << " in MemoryMappedFile::map"
                        << endl;
                close();
                fassertFailed( 16225 );
            }
        }

        void *view = 0;
        {
            scoped_lock lk(mapViewMutex);
            DWORD access = ( options & READONLY ) ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS;
            LPVOID thisAddress = getNextMemoryMappedFileLocation( length );
            view = MapViewOfFileEx(
                    maphandle,      // file mapping handle
                    access,         // access
                    0, 0,           // file offset, high and low
                    0,              // bytes to map, 0 == all
                    thisAddress );  // address to place file
            if ( view == 0 ) {
                DWORD dosError = GetLastError();
                log() << "MapViewOfFileEx for " << filename
                        << " failed with " << errnoWithDescription( dosError )
                        << " (file size is " << length << ")"
                        << " in MemoryMappedFile::map"
                        << endl;
                close();
                fassertFailed( 16166 );
            }
        }
        views.push_back(view);
        memconcept::is(view, memconcept::concept::memorymappedfile, this->filename(), (unsigned) length);
        len = length;
        return view;
    }
예제 #5
0
bool BlockRef::commit()
{
    std::lock_guard<boost::fibers::mutex> lk(chunk_->blocksMutex_);
    return commitUnlocked();
}
예제 #6
0
void SWRenderer::SetScreenshot(const char *_szFilename)
{
	std::lock_guard<std::mutex> lk(s_criticalScreenshot);
	s_sScreenshotName = _szFilename;
	s_bScreenshot = true;
}
Status EphemeralForTestEngine::dropIdent(OperationContext* opCtx, StringData ident) {
    stdx::lock_guard<stdx::mutex> lk(_mutex);
    _dataMap.erase(ident);
    return Status::OK();
}
예제 #8
0
 inline underlying_queue_type underlying_queue() {
   lock_guard<mutex> lk(mtx_);
   return boost::move(data_);
 }
StatusWith<std::string> ShardingCatalogManager::addShard(
    OperationContext* opCtx,
    const std::string* shardProposedName,
    const ConnectionString& shardConnectionString,
    const long long maxSize) {
    if (shardConnectionString.type() == ConnectionString::INVALID) {
        return {ErrorCodes::BadValue, "Invalid connection string"};
    }

    if (shardProposedName && shardProposedName->empty()) {
        return {ErrorCodes::BadValue, "shard name cannot be empty"};
    }

    // Only one addShard operation can be in progress at a time.
    Lock::ExclusiveLock lk(opCtx->lockState(), _kShardMembershipLock);

    // Check if this shard has already been added (can happen in the case of a retry after a network
    // error, for example) and thus this addShard request should be considered a no-op.
    auto existingShard =
        _checkIfShardExists(opCtx, shardConnectionString, shardProposedName, maxSize);
    if (!existingShard.isOK()) {
        return existingShard.getStatus();
    }
    if (existingShard.getValue()) {
        // These hosts already belong to an existing shard, so report success and terminate the
        // addShard request.  Make sure to set the last optime for the client to the system last
        // optime so that we'll still wait for replication so that this state is visible in the
        // committed snapshot.
        repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
        return existingShard.getValue()->getName();
    }

    // Force a reload of the ShardRegistry to ensure that, in case this addShard is to re-add a
    // replica set that has recently been removed, we have detached the ReplicaSetMonitor for the
    // set with that setName from the ReplicaSetMonitorManager and will create a new
    // ReplicaSetMonitor when targeting the set below.
    // Note: This is necessary because as of 3.4, removeShard is performed by mongos (unlike
    // addShard), so the ShardRegistry is not synchronously reloaded on the config server when a
    // shard is removed.
    if (!Grid::get(opCtx)->shardRegistry()->reload(opCtx)) {
        // If the first reload joined an existing one, call reload again to ensure the reload is
        // fresh.
        Grid::get(opCtx)->shardRegistry()->reload(opCtx);
    }

    // TODO: Don't create a detached Shard object, create a detached RemoteCommandTargeter instead.
    const std::shared_ptr<Shard> shard{
        Grid::get(opCtx)->shardRegistry()->createConnection(shardConnectionString)};
    invariant(shard);
    auto targeter = shard->getTargeter();

    auto stopMonitoringGuard = MakeGuard([&] {
        if (shardConnectionString.type() == ConnectionString::SET) {
            // This is a workaround for the case were we could have some bad shard being
            // requested to be added and we put that bad connection string on the global replica set
            // monitor registry. It needs to be cleaned up so that when a correct replica set is
            // added, it will be recreated.
            ReplicaSetMonitor::remove(shardConnectionString.getSetName());
        }
    });

    // Validate the specified connection string may serve as shard at all
    auto shardStatus =
        _validateHostAsShard(opCtx, targeter, shardProposedName, shardConnectionString);
    if (!shardStatus.isOK()) {
        return shardStatus.getStatus();
    }
    ShardType& shardType = shardStatus.getValue();

    // Check that none of the existing shard candidate's dbs exist already
    auto dbNamesStatus = _getDBNamesListFromShard(opCtx, targeter);
    if (!dbNamesStatus.isOK()) {
        return dbNamesStatus.getStatus();
    }

    for (const auto& dbName : dbNamesStatus.getValue()) {
        auto dbt = Grid::get(opCtx)->catalogClient()->getDatabase(
            opCtx, dbName, repl::ReadConcernLevel::kLocalReadConcern);
        if (dbt.isOK()) {
            const auto& dbDoc = dbt.getValue().value;
            return Status(ErrorCodes::OperationFailed,
                          str::stream() << "can't add shard "
                                        << "'"
                                        << shardConnectionString.toString()
                                        << "'"
                                        << " because a local database '"
                                        << dbName
                                        << "' exists in another "
                                        << dbDoc.getPrimary());
        } else if (dbt != ErrorCodes::NamespaceNotFound) {
            return dbt.getStatus();
        }
    }

    // Check that the shard candidate does not have a local config.system.sessions collection
    auto res = _dropSessionsCollection(opCtx, targeter);

    if (!res.isOK()) {
        return res.withContext(
            "can't add shard with a local copy of config.system.sessions, please drop this "
            "collection from the shard manually and try again.");
    }

    // If a name for a shard wasn't provided, generate one
    if (shardType.getName().empty()) {
        auto result = generateNewShardName(opCtx);
        if (!result.isOK()) {
            return result.getStatus();
        }
        shardType.setName(result.getValue());
    }

    if (maxSize > 0) {
        shardType.setMaxSizeMB(maxSize);
    }

    // Insert a shardIdentity document onto the shard. This also triggers sharding initialization on
    // the shard.
    LOG(2) << "going to insert shardIdentity document into shard: " << shardType;
    auto commandRequest = createShardIdentityUpsertForAddShard(opCtx, shardType.getName());
    auto swCommandResponse = _runCommandForAddShard(opCtx, targeter.get(), "admin", commandRequest);
    if (!swCommandResponse.isOK()) {
        return swCommandResponse.getStatus();
    }
    auto commandResponse = std::move(swCommandResponse.getValue());
    BatchedCommandResponse batchResponse;
    auto batchResponseStatus =
        Shard::CommandResponse::processBatchWriteResponse(commandResponse, &batchResponse);
    if (!batchResponseStatus.isOK()) {
        return batchResponseStatus;
    }

    // The featureCompatibilityVersion should be the same throughout the cluster. We don't
    // explicitly send writeConcern majority to the added shard, because a 3.4 mongod will reject
    // it (setFCV did not support writeConcern until 3.6), and a 3.6 mongod will still default to
    // majority writeConcern.
    //
    // TODO SERVER-32045: propagate the user's writeConcern
    auto versionResponse = _runCommandForAddShard(
        opCtx,
        targeter.get(),
        "admin",
        BSON(FeatureCompatibilityVersion::kCommandName << FeatureCompatibilityVersion::toString(
                 serverGlobalParams.featureCompatibility.getVersion())));
    if (!versionResponse.isOK()) {
        return versionResponse.getStatus();
    }

    if (!versionResponse.getValue().commandStatus.isOK()) {
        return versionResponse.getValue().commandStatus;
    }

    log() << "going to insert new entry for shard into config.shards: " << shardType.toString();

    Status result = Grid::get(opCtx)->catalogClient()->insertConfigDocument(
        opCtx,
        ShardType::ConfigNS,
        shardType.toBSON(),
        ShardingCatalogClient::kMajorityWriteConcern);
    if (!result.isOK()) {
        log() << "error adding shard: " << shardType.toBSON() << " err: " << result.reason();
        return result;
    }

    // Add all databases which were discovered on the new shard
    for (const auto& dbName : dbNamesStatus.getValue()) {
        DatabaseType dbt(dbName, shardType.getName(), false);
        Status status = Grid::get(opCtx)->catalogClient()->updateDatabase(opCtx, dbName, dbt);
        if (!status.isOK()) {
            log() << "adding shard " << shardConnectionString.toString()
                  << " even though could not add database " << dbName;
        }
    }

    // Record in changelog
    BSONObjBuilder shardDetails;
    shardDetails.append("name", shardType.getName());
    shardDetails.append("host", shardConnectionString.toString());

    Grid::get(opCtx)
        ->catalogClient()
        ->logChange(
            opCtx, "addShard", "", shardDetails.obj(), ShardingCatalogClient::kMajorityWriteConcern)
        .transitional_ignore();

    // Ensure the added shard is visible to this process.
    auto shardRegistry = Grid::get(opCtx)->shardRegistry();
    if (!shardRegistry->getShard(opCtx, shardType.getName()).isOK()) {
        return {ErrorCodes::OperationFailed,
                "Could not find shard metadata for shard after adding it. This most likely "
                "indicates that the shard was removed immediately after it was added."};
    }
    stopMonitoringGuard.Dismiss();

    return shardType.getName();
}
예제 #10
0
파일: instance.cpp 프로젝트: QiuYe/mongo
    bool receivedGetMore(DbResponse& dbresponse, Message& m, CurOp& curop ) {
        bool ok = true;

        DbMessage d(m);

        const char *ns = d.getns();
        int ntoreturn = d.pullInt();
        long long cursorid = d.pullInt64();

        curop.debug().ns = ns;
        curop.debug().ntoreturn = ntoreturn;
        curop.debug().cursorid = cursorid;

        shared_ptr<AssertionException> ex;
        scoped_ptr<Timer> timer;
        int pass = 0;
        bool exhaust = false;
        QueryResult* msgdata = 0;
        OpTime last;
        while( 1 ) {
            try {
                const NamespaceString nsString( ns );
                uassert( 16258, str::stream() << "Invalid ns [" << ns << "]", nsString.isValid() );

                Status status = cc().getAuthorizationManager()->checkAuthForGetMore(ns);
                uassert(16543, status.reason(), status.isOK());

                if (str::startsWith(ns, "local.oplog.")){
                    if (pass == 0) {
                        mutex::scoped_lock lk(OpTime::m);
                        last = OpTime::getLast(lk);
                    }
                    else {
                        last.waitForDifferent(1000/*ms*/);
                    }
                }

                msgdata = processGetMore(ns, ntoreturn, cursorid, curop, pass, exhaust);
            }
            catch ( AssertionException& e ) {
                ex.reset( new AssertionException( e.getInfo().msg, e.getCode() ) );
                ok = false;
                break;
            }
            
            if (msgdata == 0) {
                // this should only happen with QueryOption_AwaitData
                exhaust = false;
                massert(13073, "shutting down", !inShutdown() );
                if ( ! timer ) {
                    timer.reset( new Timer() );
                }
                else {
                    if ( timer->seconds() >= 4 ) {
                        // after about 4 seconds, return. pass stops at 1000 normally.
                        // we want to return occasionally so slave can checkpoint.
                        pass = 10000;
                    }
                }
                pass++;
                if (debug)
                    sleepmillis(20);
                else
                    sleepmillis(2);
                
                // note: the 1100 is beacuse of the waitForDifferent above
                // should eventually clean this up a bit
                curop.setExpectedLatencyMs( 1100 + timer->millis() );
                
                continue;
            }
            break;
        };

        if (ex) {
            exhaust = false;

            BSONObjBuilder err;
            ex->getInfo().append( err );
            BSONObj errObj = err.done();

            log() << errObj << endl;

            curop.debug().exceptionInfo = ex->getInfo();

            if (ex->getCode() == 13436) {
                replyToQuery(ResultFlag_ErrSet, m, dbresponse, errObj);
                curop.debug().responseLength = dbresponse.response->header()->dataLen();
                curop.debug().nreturned = 1;
                return ok;
            }

            msgdata = emptyMoreResult(cursorid);
        }

        Message *resp = new Message();
        resp->setData(msgdata, true);
        curop.debug().responseLength = resp->header()->dataLen();
        curop.debug().nreturned = msgdata->nReturned;

        dbresponse.response = resp;
        dbresponse.responseTo = m.header()->id;
        
        if( exhaust ) {
            curop.debug().exhaust = true;
            dbresponse.exhaustNS = ns;
        }

        return ok;
    }
boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange(
    OperationContext* opCtx,
    NamespaceString const& nss,
    OID const& epoch,
    int maxToDelete,
    CollectionRangeDeleter* forTestOnly) {

    StatusWith<int> wrote = 0;

    auto range = boost::optional<ChunkRange>(boost::none);
    auto notification = DeleteNotification();

    {
        UninterruptibleLockGuard noInterrupt(opCtx->lockState());
        AutoGetCollection autoColl(opCtx, nss, MODE_IX);

        auto* const collection = autoColl.getCollection();
        auto* const css = CollectionShardingRuntime::get(opCtx, nss);
        auto& metadataManager = css->_metadataManager;
        auto* const self = forTestOnly ? forTestOnly : &metadataManager->_rangesToClean;

        const auto scopedCollectionMetadata =
            metadataManager->getActiveMetadata(metadataManager, boost::none);

        if (!scopedCollectionMetadata) {
            LOG(0) << "Abandoning any range deletions because the metadata for " << nss.ns()
                   << " was reset";
            stdx::lock_guard<stdx::mutex> lk(css->_metadataManager->_managerLock);
            css->_metadataManager->_clearAllCleanups(lk);
            return boost::none;
        }

        const auto& metadata = *scopedCollectionMetadata;

        if (!forTestOnly && (!collection || !metadata->isSharded())) {
            if (!collection) {
                LOG(0) << "Abandoning any range deletions left over from dropped " << nss.ns();
            } else {
                LOG(0) << "Abandoning any range deletions left over from previously sharded"
                       << nss.ns();
            }

            stdx::lock_guard<stdx::mutex> lk(css->_metadataManager->_managerLock);
            css->_metadataManager->_clearAllCleanups(lk);
            return boost::none;
        }

        if (!forTestOnly && metadata->getCollVersion().epoch() != epoch) {
            LOG(1) << "Range deletion task for " << nss.ns() << " epoch " << epoch << " woke;"
                   << " (current is " << metadata->getCollVersion() << ")";
            return boost::none;
        }

        bool writeOpLog = false;

        {
            stdx::lock_guard<stdx::mutex> scopedLock(css->_metadataManager->_managerLock);
            if (self->isEmpty()) {
                LOG(1) << "No further range deletions scheduled on " << nss.ns();
                return boost::none;
            }

            auto& orphans = self->_orphans;
            if (orphans.empty()) {
                // We have delayed deletions; see if any are ready.
                auto& df = self->_delayedOrphans.front();
                if (df.whenToDelete > Date_t::now()) {
                    LOG(0) << "Deferring deletion of " << nss.ns() << " range "
                           << redact(df.range.toString()) << " until " << df.whenToDelete;
                    return df.whenToDelete;
                }

                // Move a single range from _delayedOrphans to _orphans
                orphans.splice(orphans.end(), self->_delayedOrphans, self->_delayedOrphans.begin());
                LOG(1) << "Proceeding with deferred deletion of " << nss.ns() << " range "
                       << redact(orphans.front().range.toString());

                writeOpLog = true;
            }

            invariant(!orphans.empty());
            const auto& frontRange = orphans.front().range;
            range.emplace(frontRange.getMin().getOwned(), frontRange.getMax().getOwned());
            notification = orphans.front().notification;
        }

        invariant(range);

        if (writeOpLog) {
            // Secondaries will watch for this update, and kill any queries that may depend on
            // documents in the range -- excepting any queries with a read-concern option
            // 'ignoreChunkMigration'
            try {
                AutoGetCollection autoAdmin(
                    opCtx, NamespaceString::kServerConfigurationNamespace, MODE_IX);

                Helpers::upsert(opCtx,
                                NamespaceString::kServerConfigurationNamespace.ns(),
                                BSON("_id"
                                     << "startRangeDeletion"
                                     << "ns"
                                     << nss.ns()
                                     << "epoch"
                                     << epoch
                                     << "min"
                                     << range->getMin()
                                     << "max"
                                     << range->getMax()));
            } catch (const DBException& e) {
                stdx::lock_guard<stdx::mutex> scopedLock(css->_metadataManager->_managerLock);
                css->_metadataManager->_clearAllCleanups(
                    scopedLock,
                    e.toStatus("cannot push startRangeDeletion record to Op Log,"
                               " abandoning scheduled range deletions"));
                return boost::none;
            }
        }

        try {
            wrote = self->_doDeletion(
                opCtx, collection, metadata->getKeyPattern(), *range, maxToDelete);
        } catch (const DBException& e) {
            wrote = e.toStatus();
            warning() << e.what();
        }
    }  // drop autoColl

    if (!wrote.isOK() || wrote.getValue() == 0) {
        if (wrote.isOK()) {
            LOG(0) << "No documents remain to delete in " << nss << " range "
                   << redact(range->toString());
        }

        // Wait for majority replication even when wrote isn't OK or == 0, because it might have
        // been OK and/or > 0 previously, and the deletions must be persistent before notifying
        // clients in _pop().

        LOG(0) << "Waiting for majority replication of local deletions in " << nss.ns() << " range "
               << redact(range->toString());

        repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
        const auto clientOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();

        // Wait for replication outside the lock
        const auto status = [&] {
            try {
                WriteConcernResult unusedWCResult;
                return waitForWriteConcern(
                    opCtx, clientOpTime, kMajorityWriteConcern, &unusedWCResult);
            } catch (const DBException& e) {
                return e.toStatus();
            }
        }();

        // Get the lock again to finish off this range (including notifying, if necessary).
        // Don't allow lock interrupts while cleaning up.
        UninterruptibleLockGuard noInterrupt(opCtx->lockState());
        AutoGetCollection autoColl(opCtx, nss, MODE_IX);
        auto* const css = CollectionShardingRuntime::get(opCtx, nss);
        auto& metadataManager = css->_metadataManager;
        auto* const self = forTestOnly ? forTestOnly : &metadataManager->_rangesToClean;

        stdx::lock_guard<stdx::mutex> scopedLock(css->_metadataManager->_managerLock);

        if (!status.isOK()) {
            LOG(0) << "Error when waiting for write concern after removing " << nss << " range "
                   << redact(range->toString()) << " : " << redact(status.reason());

            // If range were already popped (e.g. by dropping nss during the waitForWriteConcern
            // above) its notification would have been triggered, so this check suffices to ensure
            // that it is safe to pop the range here
            if (!notification.ready()) {
                invariant(!self->isEmpty() && self->_orphans.front().notification == notification);
                LOG(0) << "Abandoning deletion of latest range in " << nss.ns() << " after local "
                       << "deletions because of replication failure";
                self->_pop(status);
            }
        } else {
            LOG(0) << "Finished deleting documents in " << nss.ns() << " range "
                   << redact(range->toString());

            self->_pop(wrote.getStatus());
        }

        if (!self->_orphans.empty()) {
            LOG(1) << "Deleting " << nss.ns() << " range "
                   << redact(self->_orphans.front().range.toString()) << " next.";
        }

        return Date_t::now() + stdx::chrono::milliseconds{rangeDeleterBatchDelayMS.load()};
    }

    invariant(range);
    invariant(wrote.getStatus());
    invariant(wrote.getValue() > 0);

    notification.abandon();
    return Date_t::now() + stdx::chrono::milliseconds{rangeDeleterBatchDelayMS.load()};
}
예제 #12
0
파일: instance.cpp 프로젝트: QiuYe/mongo
 void DiagLog::writeop(char *data,int len) {
     if ( level & 1 ) {
         scoped_lock lk(mutex);
         f->write(data,len);
     }
 }
예제 #13
0
void FuseWrapper::RegisterFuseClient(std::string path_prefix,
                                     FuseClient* client) {
  std::unique_lock<std::mutex> lk(list_mutex);
  delegation_map[path_prefix] = client;
}
예제 #14
0
Timestamp getLastSetTimestamp() {
    stdx::lock_guard<stdx::mutex> lk(globalTimestampMutex);
    return globalTimestamp;
}
예제 #15
0
 bool is_enabled() const {
     threading::scoped_lock lk(m_cs);
     return m_enabled;
 }
void IndexBuildsCoordinatorMongod::signalChangeToSecondaryMode() {
    stdx::unique_lock<stdx::mutex> lk(_mutex);
    _replMode = ReplState::Secondary;
}
예제 #17
0
 void set_enabled(bool enabled) {
     threading::scoped_lock lk(m_cs);
     m_enabled = enabled;
 }
void IndexBuildsCoordinatorMongod::signalChangeToInitialSyncMode() {
    stdx::unique_lock<stdx::mutex> lk(_mutex);
    _replMode = ReplState::InitialSync;
}
SortedDataInterface* EphemeralForTestEngine::getSortedDataInterface(OperationContext* opCtx,
                                                                    StringData ident,
                                                                    const IndexDescriptor* desc) {
    stdx::lock_guard<stdx::mutex> lk(_mutex);
    return getEphemeralForTestBtreeImpl(Ordering::make(desc->keyPattern()), &_dataMap[ident]);
}
StatusWith<SharedSemiFuture<ReplIndexBuildState::IndexCatalogStats>>
IndexBuildsCoordinatorMongod::startIndexBuild(OperationContext* opCtx,
                                              CollectionUUID collectionUUID,
                                              const std::vector<BSONObj>& specs,
                                              const UUID& buildUUID) {
    std::vector<std::string> indexNames;
    for (auto& spec : specs) {
        std::string name = spec.getStringField(IndexDescriptor::kIndexNameFieldName);
        if (name.empty()) {
            return Status(
                ErrorCodes::CannotCreateIndex,
                str::stream() << "Cannot create an index for a spec '" << spec
                              << "' without a non-empty string value for the 'name' field");
        }
        indexNames.push_back(name);
    }

    auto nss = UUIDCatalog::get(opCtx).lookupNSSByUUID(collectionUUID);
    auto dbName = nss.db().toString();
    auto replIndexBuildState =
        std::make_shared<ReplIndexBuildState>(buildUUID, collectionUUID, dbName, indexNames, specs);

    Status status = _registerIndexBuild(opCtx, replIndexBuildState);
    if (!status.isOK()) {
        return status;
    }

    // Run index build in-line if we are transitioning between replication modes.
    // While the RSTLExclusive is being held, the async thread in the thread pool is not allowed
    // to take locks.
    if (opCtx->lockState()->isRSTLExclusive()) {
        log() << "Running index build on current thread because we are transitioning between "
                 "replication states: "
              << buildUUID;
        // Sets up and runs the index build. Sets result and cleans up index build.
        _runIndexBuild(opCtx, buildUUID);
        return replIndexBuildState->sharedPromise.getFuture();
    }

    // Task in thread pool should retain the caller's deadline.
    auto deadline = opCtx->getDeadline();
    auto timeoutError = opCtx->getTimeoutError();

    // Task in thread pool should have similar CurOp representation to the caller so that it can be
    // identified as a createIndexes operation.
    BSONObj opDesc;
    {
        stdx::unique_lock<Client> lk(*opCtx->getClient());
        auto curOp = CurOp::get(opCtx);
        opDesc = curOp->opDescription().getOwned();
    }

    status = _threadPool.schedule([ this, buildUUID, deadline, timeoutError, opDesc ]() noexcept {
        auto opCtx = Client::getCurrent()->makeOperationContext();

        opCtx->setDeadlineByDate(deadline, timeoutError);

        {
            stdx::unique_lock<Client> lk(*opCtx->getClient());
            auto curOp = CurOp::get(opCtx.get());
            curOp->setOpDescription_inlock(opDesc);
        }

        // Sets up and runs the index build. Sets result and cleans up index build.
        _runIndexBuild(opCtx.get(), buildUUID);
    });

    // Clean up the index build if we failed to schedule it.
    if (!status.isOK()) {
        stdx::unique_lock<stdx::mutex> lk(_mutex);

        // Unregister the index build before setting the promises, so callers do not see the build
        // again.
        _unregisterIndexBuild(lk, opCtx, replIndexBuildState);

        // Set the promise in case another thread already joined the index build.
        replIndexBuildState->sharedPromise.setError(status);

        return status;
    }

    return replIndexBuildState->sharedPromise.getFuture();
}
예제 #21
0
template <> void
AccountFactory::clear()
{
    std::lock_guard<std::recursive_mutex> lk(mutex_);
    accountMaps_.clear();
}
예제 #22
0
// Worker thread
void HttpClient::networkThread()
{    
    HttpRequest *request = nullptr;
    
    auto scheduler = Director::getInstance()->getScheduler();
    
    while (true) 
    {
        if (s_need_quit)
        {
            break;
        }
        
        // step 1: send http request if the requestQueue isn't empty
        request = nullptr;
        
        s_requestQueueMutex.lock();
        
        //Get request task from queue
        
        if (!s_requestQueue->empty())
        {
            request = s_requestQueue->at(0);
            s_requestQueue->erase(0);
        }
        
        s_requestQueueMutex.unlock();
        
        if (nullptr == request)
        {
            // Wait for http request tasks from main thread
            std::unique_lock<std::mutex> lk(s_SleepMutex); 
            s_SleepCondition.wait(lk);
            continue;
        }
        
        // step 2: libcurl sync access
        
        // Create a HttpResponse object, the default setting is http access failed
        HttpResponse *response = new HttpResponse(request);
        
        // request's refcount = 2 here, it's retained by HttpRespose constructor
        request->release();
        // ok, refcount = 1 now, only HttpResponse hold it.
        
        long responseCode = -1;
        int retValue = 0;

        // Process the request -> get response packet
        switch (request->getRequestType())
        {
            case HttpRequest::Type::GET: // HTTP GET
                retValue = processGetTask(request,
                                          writeData, 
                                          response->getResponseData(), 
                                          &responseCode,
                                          writeHeaderData,
                                          response->getResponseHeader());
                break;
            
            case HttpRequest::Type::POST: // HTTP POST
                retValue = processPostTask(request,
                                           writeData, 
                                           response->getResponseData(), 
                                           &responseCode,
                                           writeHeaderData,
                                           response->getResponseHeader());
                break;

            case HttpRequest::Type::PUT:
                retValue = processPutTask(request,
                                          writeData,
                                          response->getResponseData(),
                                          &responseCode,
                                          writeHeaderData,
                                          response->getResponseHeader());
                break;

            case HttpRequest::Type::DELETE:
                retValue = processDeleteTask(request,
                                             writeData,
                                             response->getResponseData(),
                                             &responseCode,
                                             writeHeaderData,
                                             response->getResponseHeader());
                break;
            
            default:
                CCASSERT(true, "CCHttpClient: unkown request type, only GET and POSt are supported");
                break;
        }
                
        // write data to HttpResponse
        response->setResponseCode(responseCode);
        
        if (retValue != 0) 
        {
            response->setSucceed(false);
            response->setErrorBuffer(s_errorBuffer);
        }
        else
        {
            response->setSucceed(true);
        }

        
        // add response packet into queue
        s_responseQueueMutex.lock();
        s_responseQueue->pushBack(response);
        s_responseQueueMutex.unlock();
        
        if (nullptr != s_pHttpClient) {
            scheduler->performFunctionInCocosThread(CC_CALLBACK_0(HttpClient::dispatchResponseCallbacks, this));
        }
    }
    
    // cleanup: if worker thread received quit signal, clean up un-completed request queue
    s_requestQueueMutex.lock();
    s_requestQueue->clear();
    s_requestQueueMutex.unlock();
    
    
    if (s_requestQueue != nullptr) {
        delete s_requestQueue;
        s_requestQueue = nullptr;
        delete s_responseQueue;
        s_responseQueue = nullptr;
    }
    
}
예제 #23
0
BlockRef::BlockRef(std::shared_ptr<Chunk> chunk, Chunk::BlockPos pos)
    : chunk_(std::move(chunk)), pos_(pos)
{
    std::lock_guard<boost::fibers::mutex> lk(chunk_->blocksMutex_);
    original_ = chunk_->getBlockUnlocked(pos_);
}
예제 #24
0
MigrationSecondaryThrottleOptions BalancerConfiguration::getSecondaryThrottle() const {
    stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
    return _balancerSettings.getSecondaryThrottle();
}
예제 #25
0
static void CompressAndDumpState(CompressAndDumpState_args save_args)
{
	std::lock_guard<std::mutex> lk(*save_args.buffer_mutex);
	if (!save_args.wait)
		g_compressAndDumpStateSyncEvent.Set();

	const u8* const buffer_data = &(*(save_args.buffer_vector))[0];
	const size_t buffer_size = (save_args.buffer_vector)->size();
	std::string& filename = save_args.filename;

	// For easy debugging
	Common::SetCurrentThreadName("SaveState thread");

	// Moving to last overwritten save-state
	if (File::Exists(filename))
	{
		if (File::Exists(File::GetUserPath(D_STATESAVES_IDX) + "lastState.sav"))
			File::Delete((File::GetUserPath(D_STATESAVES_IDX) + "lastState.sav"));
		if (File::Exists(File::GetUserPath(D_STATESAVES_IDX) + "lastState.sav.dtm"))
			File::Delete((File::GetUserPath(D_STATESAVES_IDX) + "lastState.sav.dtm"));

		if (!File::Rename(filename, File::GetUserPath(D_STATESAVES_IDX) + "lastState.sav"))
			Core::DisplayMessage("Failed to move previous state to state undo backup", 1000);
		else
			File::Rename(filename + ".dtm", File::GetUserPath(D_STATESAVES_IDX) + "lastState.sav.dtm");
	}

	if ((Movie::IsMovieActive()) && !Movie::IsJustStartingRecordingInputFromSaveState())
		Movie::SaveRecording(filename + ".dtm");
	else if (!Movie::IsMovieActive())
		File::Delete(filename + ".dtm");

	File::IOFile f(filename, "wb");
	if (!f)
	{
		Core::DisplayMessage("Could not save state", 2000);
		g_compressAndDumpStateSyncEvent.Set();
		return;
	}

	// Setting up the header
	StateHeader header;
	memcpy(header.gameID, SConfig::GetInstance().m_LocalCoreStartupParameter.GetUniqueID().c_str(), 6);
	header.size = g_use_compression ? (u32)buffer_size : 0;
	header.time = Common::Timer::GetDoubleTime();

	f.WriteArray(&header, 1);

	if (header.size != 0) // non-zero header size means the state is compressed
	{
		lzo_uint i = 0;
		while (true)
		{
			lzo_uint32 cur_len = 0;
			lzo_uint out_len = 0;

			if ((i + IN_LEN) >= buffer_size)
			{
				cur_len = (lzo_uint32)(buffer_size - i);
			}
			else
			{
				cur_len = IN_LEN;
			}

			if (lzo1x_1_compress(buffer_data + i, cur_len, out, &out_len, wrkmem) != LZO_E_OK)
				PanicAlertT("Internal LZO Error - compression failed");

			// The size of the data to write is 'out_len'
			f.WriteArray((lzo_uint32*)&out_len, 1);
			f.WriteBytes(out, out_len);

			if (cur_len != IN_LEN)
				break;

			i += cur_len;
		}
	}
	else // uncompressed
	{
		f.WriteBytes(buffer_data, buffer_size);
	}

	Core::DisplayMessage(StringFromFormat("Saved State to %s", filename.c_str()), 2000);
	g_compressAndDumpStateSyncEvent.Set();
}
예제 #26
0
bool BalancerConfiguration::waitForDelete() const {
    stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
    return _balancerSettings.waitForDelete();
}
void ActiveMigrationsRegistry::_clearReceiveChunk() {
    stdx::lock_guard<stdx::mutex> lk(_mutex);
    invariant(_activeReceiveChunkState);
    _activeReceiveChunkState.reset();
}
예제 #28
0
BalancerSettingsType::BalancerMode BalancerConfiguration::getBalancerMode() const {
    stdx::lock_guard<stdx::mutex> lk(_balancerSettingsMutex);
    return _balancerSettings.getMode();
}
예제 #29
0
파일: config.cpp 프로젝트: gilles/mongo
 bool DBConfig::load(){
     scoped_lock lk( _lock );
     return _load();
 }
예제 #30
0
    bool replset::InitialSync::oplogApplication(OplogReader& r, const Member* source,
        const OpTime& applyGTE, const OpTime& minValid) {

        const string hn = source->fullName();
        try {
            r.tailingQueryGTE( rsoplog, applyGTE );
            if ( !r.haveCursor() ) {
                log() << "replSet initial sync oplog query error" << rsLog;
                return false;
            }

            {
                if( !r.more() ) {
                    sethbmsg("replSet initial sync error reading remote oplog");
                    log() << "replSet initial sync error remote oplog (" << rsoplog << ") on host " << hn << " is empty?" << rsLog;
                    return false;
                }
                bo op = r.next();
                OpTime t = op["ts"]._opTime();
                r.putBack(op);

                if( op.firstElementFieldName() == string("$err") ) {
                    log() << "replSet initial sync error querying " << rsoplog << " on " << hn << " : " << op.toString() << rsLog;
                    return false;
                }

                uassert( 13508 , str::stream() << "no 'ts' in first op in oplog: " << op , !t.isNull() );
                if( t > applyGTE ) {
                    sethbmsg(str::stream() << "error " << hn << " oplog wrapped during initial sync");
                    log() << "replSet initial sync expected first optime of " << applyGTE << rsLog;
                    log() << "replSet initial sync but received a first optime of " << t << " from " << hn << rsLog;
                    return false;
                }

                sethbmsg(str::stream() << "initial oplog application from " << hn << " starting at "
                         << t.toStringPretty() << " to " << minValid.toStringPretty());
            }
        }
        catch(DBException& e) {
            log() << "replSet initial sync failing: " << e.toString() << rsLog;
            return false;
        }

        /* we lock outside the loop to avoid the overhead of locking on every operation. */
        writelock lk("");

        // todo : use exhaust
        OpTime ts;
        time_t start = time(0);
        unsigned long long n = 0;
        int fails = 0;
        while( ts < minValid ) {
            try {
                // There are some special cases with initial sync (see the catch block), so we
                // don't want to break out of this while until we've reached minvalid. Thus, we'll
                // keep trying to requery.
                if( !r.more() ) {
                    OCCASIONALLY log() << "replSet initial sync oplog: no more records" << endl;
                    sleepsecs(1);

                    r.resetCursor();
                    r.tailingQueryGTE(rsoplog, theReplSet->lastOpTimeWritten);
                    if ( !r.haveCursor() ) {
                        if (fails++ > 30) {
                            log() << "replSet initial sync tried to query oplog 30 times, giving up" << endl;
                            return false;
                        }
                    }

                    continue;
                }

                BSONObj o = r.nextSafe(); /* note we might get "not master" at some point */
                ts = o["ts"]._opTime();

                {
                    if( (source->state() != MemberState::RS_PRIMARY &&
                            source->state() != MemberState::RS_SECONDARY) ||
                            replSetForceInitialSyncFailure ) {

                        int f = replSetForceInitialSyncFailure;
                        if( f > 0 ) {
                            replSetForceInitialSyncFailure = f-1;
                            log() << "replSet test code invoked, replSetForceInitialSyncFailure" << rsLog;
                            throw DBException("forced error",0);
                        }
                        log() << "replSet we are now primary" << rsLog;
                        throw DBException("primary changed",0);
                    }

                    applyOp(o, applyGTE);
                }

                if ( ++n % 1000 == 0 ) {
                    time_t now = time(0);
                    if (now - start > 10) {
                        // simple progress metering
                        log() << "replSet initialSyncOplogApplication applied " << n << " operations, synced to "
                              << ts.toStringPretty() << rsLog;
                        start = now;
                    }
                }

                getDur().commitIfNeeded();
            }
            catch (DBException& e) {
                // Skip duplicate key exceptions.
                // These are relatively common on initial sync: if a document is inserted
                // early in the clone step, the insert will be replayed but the document
                // will probably already have been cloned over.
                if( e.getCode() == 11000 || e.getCode() == 11001 || e.getCode() == 12582) {
                    continue;
                }
                
                // handle cursor not found (just requery)
                if( e.getCode() == 13127 ) {
                    log() << "replSet requerying oplog after cursor not found condition, ts: " << ts.toStringPretty() << endl;
                    r.resetCursor();
                    r.tailingQueryGTE(rsoplog, ts);
                    if( r.haveCursor() ) {
                        continue;
                    }
                }

                // TODO: handle server restart

                if( ts <= minValid ) {
                    // didn't make it far enough
                    log() << "replSet initial sync failing, error applying oplog : " << e.toString() << rsLog;
                    return false;
                }

                // otherwise, whatever, we'll break out of the loop and catch
                // anything that's really wrong in syncTail
            }
        }
        return true;
    }