예제 #1
0
void
ThreadPoolWorker::run()
{
    // This is hokey in the extreme.  To compute the stack limit,
    // subtract the size of the stack from the address of a local
    // variable and give a 2k buffer.  Is there a better way?
    uintptr_t stackLimitOffset = WORKER_THREAD_STACK_SIZE - 2*1024;
    uintptr_t stackLimit = (((uintptr_t)&stackLimitOffset) +
                             stackLimitOffset * JS_STACK_GROWTH_DIRECTION);

    AutoLockMonitor lock(*this);
    JS_ASSERT(state_ == ACTIVE);
    for (;;) {
        while (!worklist_.empty()) {
            TaskExecutor *task = worklist_.popCopy();
            {
                AutoUnlockMonitor unlock(*this);
                task->executeFromWorker(workerId_, stackLimit);
            }
        }

        if (state_ == TERMINATING)
            break;

        lock.wait();
    }

    JS_ASSERT(worklist_.empty());
    state_ = TERMINATED;
    lock.notify();
}
예제 #2
0
    // cannot be invoked by more than one thread at a time
    void barrier(TaskExecutor<Options> &te) {
        tm.start_executing();

        {
            TaskQueueUnsafe woken;
            while (te.execute_tasks(woken));
        }

        const size_t num_workers(tm.get_num_cpus()-1);

        if (num_workers == 0)
            return;

        for (;;) {
            {
                TaskQueueUnsafe woken;
                while (te.execute_tasks(woken));
            }

            barrier_counter = num_workers;
            abort = 0;
            Atomic::memory_fence_producer();
            state = 1;

            for (;;) {
                const int local_state(state);
                if (local_state == 0) {
                    Atomic::memory_fence_consumer();
                    const int local_abort(abort);
                    if (local_abort == 1)
                        break;
                    if (!te.get_task_queue().empty_safe())
                        break;
                    return;
                }

                const int local_abort(abort);
                if (local_abort == 1 || !te.get_task_queue().empty_safe()) {
                    while (state != 0) {

                        {
                            TaskQueueUnsafe woken;
                            while (te.execute_tasks(woken));
                        }

                        Atomic::compiler_fence();
                    }
                    break;
                }
                Atomic::rep_nop();
            }
        }
    }
예제 #3
0
StatusWith<Shard::CommandResponse> ShardRemote::_runCommand(OperationContext* txn,
                                                            const ReadPreferenceSetting& readPref,
                                                            const string& dbName,
                                                            const BSONObj& cmdObj) {
    const BSONObj cmdWithMaxTimeMS = (isConfig() ? appendMaxTimeToCmdObj(txn, cmdObj) : cmdObj);

    const auto host =
        _targeter->findHost(readPref, RemoteCommandTargeter::selectFindHostMaxWaitTime(txn));
    if (!host.isOK()) {
        return host.getStatus();
    }

    RemoteCommandRequest request(host.getValue(),
                                 dbName,
                                 cmdWithMaxTimeMS,
                                 _getMetadataForCommand(readPref),
                                 isConfig() ? kConfigCommandTimeout
                                            : executor::RemoteCommandRequest::kNoTimeout);
    StatusWith<RemoteCommandResponse> swResponse =
        Status(ErrorCodes::InternalError, "Internal error running command");

    TaskExecutor* executor = Grid::get(txn)->getExecutorPool()->getFixedExecutor();
    auto callStatus = executor->scheduleRemoteCommand(
        request,
        [&swResponse](const RemoteCommandCallbackArgs& args) { swResponse = args.response; });
    if (!callStatus.isOK()) {
        return callStatus.getStatus();
    }

    // Block until the command is carried out
    executor->wait(callStatus.getValue());

    updateReplSetMonitor(host.getValue(), swResponse.getStatus());

    if (!swResponse.isOK()) {
        if (swResponse.getStatus().compareCode(ErrorCodes::ExceededTimeLimit)) {
            LOG(0) << "Operation timed out with status " << swResponse.getStatus();
        }
        return swResponse.getStatus();
    }

    BSONObj responseObj = swResponse.getValue().data.getOwned();
    BSONObj responseMetadata = swResponse.getValue().metadata.getOwned();
    Status commandStatus = getStatusFromCommandResult(responseObj);
    Status writeConcernStatus = getWriteConcernStatusFromCommandResult(responseObj);
    updateReplSetMonitor(host.getValue(), commandStatus);

    return CommandResponse(std::move(responseObj),
                           std::move(responseMetadata),
                           std::move(commandStatus),
                           std::move(writeConcernStatus));
}
예제 #4
0
void test_destroy_future_packaged_task() {
    for (int i = 0; i < 10; ++i) {
        auto task = std::packaged_task<void()>([i]() {
            std::cout << std::this_thread::get_id() << " Long text for testing, check if everything is aligned. Test " << i << std::endl;
        });
        auto task_future = task.get_future();
        _executor.add_task(std::move(task));
    }
}
예제 #5
0
파일: Driver.cpp 프로젝트: anandrathi/MyCDR
int Driver::Run()
{
	ACE_TRACE("Driver::Run");
	PATH_RECORD_MAP::iterator itor =  _path_record.begin();
	for(;itor!=_path_record.end();itor++)
	{
		TaskExecutor * te = new TaskExecutor;
		te->path = itor->first;
		PathRecordPair &t = itor->second;
		te->_RecContMap = t._RecContMap;
		te->InitScript();
		te->open();
		itor->second._TaskExecutor = te;
	}

	ACE_Thread_Manager::instance()->wait();
	return 0;
}
예제 #6
0
    void init_worker(int id) {
        Options::ThreadAffinity::pin_workerthread(id);
        // allocate Worker on thread
        TaskExecutor<Options> *te = new TaskExecutor<Options>(id, *this);

        // wait until main thread has initialized the threadmanager
        startup_lock.lock();
        startup_lock.unlock();

        threads[id] = te;
        task_queues[id] = &te->get_task_queue();

        Atomic::increase(&start_counter);

        lock_workers_initialized.lock();
        lock_workers_initialized.unlock();

        te->work_loop();
    }
예제 #7
0
TaskQueue::~TaskQueue()
{
	{
		std::unique_lock<std::mutex> l(mUnprocessedQueueMutex);
		mActive = false;
	}
	mUnprocessedQueueCond.notify_all();
	//Join all executors. Since the queue is shutting down they will all exit their main loop if there are no more tasks to process.
	for (TaskExecutorStore::iterator I = mExecutors.begin(); I != mExecutors.end(); ++I) {
		TaskExecutor* executor = *I;
		executor->join();
		delete executor;
	}

	//Finally we must process all of the tasks in our main loop. This of course requires that this instance is destroyed from the main loop.
	pollProcessedTasks(TimeFrame(boost::posix_time::seconds(60)));
	assert(mProcessedTaskUnits.empty());
	assert(mUnprocessedTaskUnits.empty());
}
예제 #8
0
void TaskQueue::deactivate()
{
    if (mActive) {
        {
            std::unique_lock < std::mutex > l(mUnprocessedQueueMutex);
            mActive = false;
        }
        mUnprocessedQueueCond.notify_all();
        //Join all executors. Since the queue is shutting down they will all exit their main loop if there are no more tasks to process.
        for (TaskExecutorStore::iterator I = mExecutors.begin(); I != mExecutors.end(); ++I) {
            TaskExecutor* executor = *I;
            executor->join();
            delete executor;
        }

        //Finally we must process all of the tasks in our main loop. This of course requires that this instance is destroyed from the main loop.
        mEventService.processAllHandlers();

        assert(mProcessedTaskUnits->empty());
        assert(mUnprocessedTaskUnits.empty());
    }
}
예제 #9
0
void
ThreadPoolWorker::run()
{
    // This is hokey in the extreme.  To compute the stack limit,
    // subtract the size of the stack from the address of a local
    // variable and give a 10k buffer.  Is there a better way?
    // (Note: 2k proved to be fine on Mac, but too little on Linux)
    uintptr_t stackLimitOffset = WORKER_THREAD_STACK_SIZE - 10*1024;
    uintptr_t stackLimit = (((uintptr_t)&stackLimitOffset) +
                             stackLimitOffset * JS_STACK_GROWTH_DIRECTION);

    AutoLockMonitor lock(*this);

    for (;;) {
        while (!worklist_.empty()) {
            TaskExecutor *task = worklist_.popCopy();
            {
                // Unlock so that new things can be added to the
                // worklist while we are processing the current item:
                AutoUnlockMonitor unlock(*this);
                task->executeFromWorker(workerId_, stackLimit);
            }
        }

        if (state_ == TERMINATING)
            break;

        JS_ASSERT(state_ == ACTIVE);

        lock.wait();
    }

    JS_ASSERT(worklist_.empty() && state_ == TERMINATING);
    state_ = TERMINATED;
    lock.notify();
}
예제 #10
0
Shard::HostWithResponse ShardRemote::_runCommand(OperationContext* txn,
                                                 const ReadPreferenceSetting& readPref,
                                                 const string& dbName,
                                                 Milliseconds maxTimeMSOverride,
                                                 const BSONObj& cmdObj) {

    ReadPreferenceSetting readPrefWithMinOpTime(readPref);
    if (getId() == "config") {
        readPrefWithMinOpTime.minOpTime = grid.configOpTime();
    }
    const auto host = _targeter->findHost(txn, readPrefWithMinOpTime);
    if (!host.isOK()) {
        return Shard::HostWithResponse(boost::none, host.getStatus());
    }

    const Milliseconds requestTimeout =
        std::min(txn->getRemainingMaxTimeMillis(), maxTimeMSOverride);

    const RemoteCommandRequest request(
        host.getValue(),
        dbName,
        appendMaxTimeToCmdObj(maxTimeMSOverride, cmdObj),
        _appendMetadataForCommand(txn, readPrefWithMinOpTime),
        txn,
        requestTimeout < Milliseconds::max() ? requestTimeout : RemoteCommandRequest::kNoTimeout);

    RemoteCommandResponse swResponse =
        Status(ErrorCodes::InternalError, "Internal error running command");

    TaskExecutor* executor = Grid::get(txn)->getExecutorPool()->getFixedExecutor();
    auto callStatus = executor->scheduleRemoteCommand(
        request,
        [&swResponse](const RemoteCommandCallbackArgs& args) { swResponse = args.response; });
    if (!callStatus.isOK()) {
        return Shard::HostWithResponse(host.getValue(), callStatus.getStatus());
    }

    // Block until the command is carried out
    executor->wait(callStatus.getValue());

    updateReplSetMonitor(host.getValue(), swResponse.status);

    if (!swResponse.isOK()) {
        if (swResponse.status.compareCode(ErrorCodes::ExceededTimeLimit)) {
            LOG(0) << "Operation timed out with status " << redact(swResponse.status);
        }
        return Shard::HostWithResponse(host.getValue(), swResponse.status);
    }

    BSONObj responseObj = swResponse.data.getOwned();
    BSONObj responseMetadata = swResponse.metadata.getOwned();
    Status commandStatus = getStatusFromCommandResult(responseObj);
    Status writeConcernStatus = getWriteConcernStatusFromCommandResult(responseObj);

    // Tell the replica set monitor of any errors
    updateReplSetMonitor(host.getValue(), commandStatus);
    updateReplSetMonitor(host.getValue(), writeConcernStatus);

    return Shard::HostWithResponse(host.getValue(),
                                   CommandResponse(std::move(responseObj),
                                                   std::move(responseMetadata),
                                                   std::move(commandStatus),
                                                   std::move(writeConcernStatus)));
}
예제 #11
0
    // Called from TaskExecutor: Wait at barrier if requested.
    void wait_at_barrier(TaskExecutor<Options> &te) {
        Atomic::compiler_fence();
        const int local_state(state);
        if (local_state == 0)
            return;

        if (te.my_barrier_state == local_state)
            return;

        te.my_barrier_state = local_state;

        // new state

        if (local_state == 1) {
            // enter barrier
            if (Atomic::decrease_nv(&barrier_counter) != 0) {
                // wait for next state
                for (;;) {
                    const int local_abort(abort);
                    if (local_abort == 1) {
                        // aborted from elsewhere -- skip spinloop and start workloop
                        return;
                    }

                    if (!te.get_task_queue().empty()) {
                        abort = 1;
                        Atomic::memory_fence_producer();
                        // we have to abort -- indicate others and start workloop
                        return; // work loop, state == 1
                    }

                    const int new_local_state(state);
                    if (new_local_state == 2) {
                        // completed phase 1 without seeing abort. continue to phase 2
                        break;
                    }
                    Atomic::yield();
                    Atomic::compiler_fence();
                }
                if (!te.get_task_queue().empty_safe()) {
                    abort = 1;
                    Atomic::memory_fence_producer();
                    // we have to abort -- indicate others and start workloop
                    return; // work loop, state == 1
                }
                // phase 1 completed without abort.
                state = 2;

                te.my_barrier_state = 2;
                if (Atomic::decrease_nv(&barrier_counter) == 0) {
                    // last into phase 2

                    barrier_counter = tm.get_num_cpus()-2;
                    te.my_barrier_state = 0;
                    const int local_abort(abort);
                    if (local_abort != 1) {
                        if (!te.get_task_queue().empty_safe()) {
                            // we have to abort -- start next phase and exit to work-loop
                            abort = 1;
                            Atomic::memory_fence_producer();
                        }
                    }
                    // leave barrier
                    state = 0;
                    return;
                }
                // phase 2, but not last. fall-through
            }
            else {
                // last in for state 1 -- start phase 2
                const size_t num_workers = tm.get_num_cpus()-1;
                if (num_workers == 1) {
                    // if there is a single worker we can't wait for anybody else to finish the barrier.
                    if (!te.get_task_queue().empty_safe()) {
                        abort = 1;
                        Atomic::memory_fence_producer();
                    }
                    te.my_barrier_state = 0;
                    state = 0;
                    return;
                }

                const int local_abort(abort);
                barrier_counter = num_workers-1;
                te.my_barrier_state = 2;
                if (local_abort == 1) {
                    // already aborted -- start next phase and exit to work-loop
                    state = 2;
                    return;
                }
                if (!te.get_task_queue().empty_safe()) {
                    // we have to abort -- start next phase and exit to work-loop
                    abort = 1;
                    Atomic::memory_fence_producer();
                    state = 2;
                    return;
                }
                // start next phase
                state = 2;
            }

            // in phase 2, but not last -- wait for completion
            for (;;) {
                const int local_abort(abort);
                if (local_abort == 1) {
                    // aborted from elsewhere -- skip spinloop and start workloop
                    return;
                }

                if (!te.get_task_queue().empty()) {
                    abort = 1;
                    Atomic::memory_fence_producer();
                    // we have to abort -- indicate others and start workloop
                    return;
                }

                const int new_local_state(state);
                if (new_local_state != 2) {
                    // completed phase 2 without seeing abort. finish barrier
                    return;
                }
                Atomic::yield();
                Atomic::compiler_fence();
            }
        }

        // first time we see state == 2, and barrier is already aborted

        if (Atomic::decrease_nv(&barrier_counter) == 0) {
            // last into phase 2 -- finish barrier
            te.my_barrier_state = 0;
            state = 0;
        }

        return;
    }
예제 #12
0
    // Called from TaskExecutor: return true if we are allowed to run tasks
    bool update_barrier_state(TaskExecutor<Options> &te) {
        Atomic::compiler_fence();
        const int local_state(state);

        // return if not in barrier
        if (local_state == 0) {
            if (te.my_barrier_state != 0) {
                te.my_barrier_state = 0;
                te.after_barrier();
            }
            return true;
        }

        // set abort flag if we have tasks
        if (!te.get_task_queue().empty()) {
            if (abort != 1) {
                abort = 1;
                Atomic::memory_fence_producer(); // make sure abort is visible before state changes
            }
        }

        // return if barrier is in same state as last time
        if (te.my_barrier_state == local_state)
            return abort == 1;

        // new state

        te.my_barrier_state = local_state;

        // enter barrier
        const unsigned int local_barrier_counter(Atomic::decrease_nv(&barrier_counter));

        // return if not last
        if (local_barrier_counter != 0)
            return abort == 1;

        // we are last to enter the barrier

        if (local_state == 1) {
            const unsigned int num_workers = tm.get_num_cpus() - 1;
            // if single worker, the barrier is finished
            if (num_workers == 1) {
                te.my_barrier_state = 0;
                state = 0;
                te.after_barrier();
                return true;
            }

            // setup state 2, join it, and return
            te.my_barrier_state = 2;
            barrier_counter = num_workers - 1;
            Atomic::memory_fence_producer(); // make sure barrier_counter is visible before state changes
            state = 2;
            return abort == 1;
        }

        // last in for stage 2 -- finish barrier
        te.my_barrier_state = 0;
        state = 0;
        te.after_barrier();
        return true;
    }