__LINK_C void scheduler_run()
{
	while(1)
	{
		while(NG(current_priority) < NUM_PRIORITIES)
		{
			check_structs_are_valid();
			for(uint8_t id = pop_task((NG(current_priority))); id != NO_TASK; id = pop_task(NG(current_priority)))
			{
				check_structs_are_valid();
				NG(m_info)[id].task();
			}
			//this needs to be done atomically since otherwise we risk decrementing the current priority
			//while a higher priority task is waiting in the queue
			start_atomic();
			if (!tasks_waiting(NG(current_priority)))
				NG(current_priority)++;
#ifndef NDEBUG
			for(int i = 0; i < NG(current_priority); i++)
				assert(!tasks_waiting(i));
#endif
			end_atomic();
		}
		hw_enter_lowpower_mode(FRAMEWORK_SCHEDULER_LP_MODE);
	}

}
Beispiel #2
0
static void
handle_remaining_tasks(ErtsRunQueue *runq, Port *pp)
{
    int i;
    ErtsPortTask *ptp;
    ErtsPortTaskQueue *ptqps[] = {pp->sched.exe_taskq, pp->sched.taskq};

    ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));

    for (i = 0; i < sizeof(ptqps)/sizeof(ErtsPortTaskQueue *); i++) {
	if (!ptqps[i])
	    continue;

	ptp = pop_task(ptqps[i]);
	while (ptp) {
	    reset_handle(ptp);
	    erts_smp_runq_unlock(runq);

	    switch (ptp->type) {
	    case ERTS_PORT_TASK_FREE:
	    case ERTS_PORT_TASK_TIMEOUT:
		break;
	    case ERTS_PORT_TASK_INPUT:
		erts_stale_drv_select(pp->id, ptp->event, DO_READ, 1);
		break;
	    case ERTS_PORT_TASK_OUTPUT:
		erts_stale_drv_select(pp->id, ptp->event, DO_WRITE, 1);
		break;
	    case ERTS_PORT_TASK_EVENT:
		erts_stale_drv_select(pp->id, ptp->event, 0, 1);
		break;
	    case ERTS_PORT_TASK_DIST_CMD:
		break;
	    default:
		erl_exit(ERTS_ABORT_EXIT,
			 "Invalid port task type: %d\n",
			 (int) ptp->type);
	    }

	    port_task_free(ptp);

	    erts_smp_runq_lock(runq);
	    ptp = pop_task(ptqps[i]);
	}
    }

    ASSERT(!pp->sched.taskq || !pp->sched.taskq->first);
}
threadpool::~threadpool()
{
    // signal that threads should not perform any new work
    shutdown_flag.store(true);

#ifndef USE_YIELD
    {
        std::lock_guard<std::mutex> lock(wakeup_mutex);
        wakeup_flag = true;
        wakeup_signal.notify_all();
    }
#endif

    // wait for work to complete then destroy thread
    for (auto && thread : threads)
    {
        thread.join();
    }

    auto current_task_package = std::unique_ptr<task_package>{nullptr};

    // signal to each uncomplete task that it will not complete due to
    // threadpool destruction
    while (pop_task(current_task_package))
    {
        try
        {
            auto except = std::runtime_error("Could not perform task before threadpool destruction");
            current_task_package->completion_promise.set_exception(std::make_exception_ptr(except));
        }
        catch (...) { }
    }
};
void gpu_worker(void *arg)
{
	hs_worker *worker_arg = (hs_worker *) arg;

	bind_to_cpu(worker_arg);

	init_cuda(worker_arg->device_id);

//	printf("GPU I am id:%d\n", worker_arg->worker_id);

	pthread_mutex_lock(&worker_arg->mutex);
	worker_arg->initialized = 1;
	pthread_cond_signal(&worker_arg->ready);
	pthread_mutex_unlock(&worker_arg->mutex);

	_task_t task;
	while (is_running())
	{
		lock_queue(worker_arg->task_queue);

		task = pop_task(worker_arg->task_queue);

		if (task == NULL)
		{
			if (is_running())
				sleep_worker(worker_arg);

			unlock_queue(worker_arg->task_queue);
			continue;
		}

		unlock_queue(worker_arg->task_queue);

		if ((task->task->arch_type & worker_arg->arch) != worker_arg->arch)
		{
			push_task(worker_arg->task_queue, task);
			continue;
		}

		execute_task(worker_arg, task);
	}

	deinit_cuda();

	pthread_exit((void*) 0);
}
Beispiel #5
0
int
erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
{
    int port_was_enqueued = 0;
    Port *pp;
    ErtsPortTaskQueue *ptqp;
    ErtsPortTask *ptp;
    int res = 0;
    int reds = ERTS_PORT_REDS_EXECUTE;
    erts_aint_t io_tasks_executed = 0;
    int fpe_was_unmasked;
    ErtsPortTaskExeBlockData blk_data = {runq, NULL};

    ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));

    ERTS_PT_CHK_PORTQ(runq);

    pp = pop_port(runq);
    if (!pp) {
	res = 0;
	goto done;
    }

    ERTS_PORT_NOT_IN_RUNQ(pp);

    *curr_port_pp = pp;

    ASSERT(pp->sched.taskq);
    ASSERT(pp->sched.taskq->first);
    ptqp = pp->sched.taskq;
    pp->sched.taskq = NULL;

    ASSERT(!pp->sched.exe_taskq);
    pp->sched.exe_taskq = ptqp;

    if (erts_smp_port_trylock(pp) == EBUSY) {
	erts_smp_runq_unlock(runq);
	erts_smp_port_lock(pp);
	erts_smp_runq_lock(runq);
    }
    
    if (erts_sched_stat.enabled) {
	ErtsSchedulerData *esdp = erts_get_scheduler_data();
	Uint old = ERTS_PORT_SCHED_ID(pp, esdp->no);
	int migrated = old && old != esdp->no;

	erts_smp_spin_lock(&erts_sched_stat.lock);
	erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_executed++;
	erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].executed++;
	if (migrated) {
	    erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_migrated++;
	    erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].migrated++;
	}
	erts_smp_spin_unlock(&erts_sched_stat.lock);
    }

    /* trace port scheduling, in */
    if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
	trace_sched_ports(pp, am_in);
    }

    ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);
    ptp = pop_task(ptqp);

    fpe_was_unmasked = erts_block_fpe();

    while (ptp) {
	ASSERT(pp->sched.taskq != pp->sched.exe_taskq);

	reset_handle(ptp);
	erts_smp_runq_unlock(runq);

	ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
	ERTS_SMP_CHK_NO_PROC_LOCKS;
	ASSERT(pp->drv_ptr);

	switch (ptp->type) {
	case ERTS_PORT_TASK_FREE: /* May be pushed in q at any time */
	    reds += ERTS_PORT_REDS_FREE;
	    erts_smp_runq_lock(runq);

	    erts_unblock_fpe(fpe_was_unmasked);
	    ASSERT(pp->status & ERTS_PORT_SFLG_FREE_SCHEDULED);
	    if (ptqp->first || (pp->sched.taskq && pp->sched.taskq->first))
		handle_remaining_tasks(runq, pp);
	    ASSERT(!ptqp->first
		   && (!pp->sched.taskq || !pp->sched.taskq->first));
#ifdef ERTS_SMP
	    erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */
	    ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */
#else
	    erts_port_status_bor_set(pp, ERTS_PORT_SFLG_FREE);	    
#endif

	    port_task_free(ptp);
	    if (pp->sched.taskq)
		port_taskq_free(pp->sched.taskq);
	    pp->sched.taskq = NULL;

	    goto tasks_done;
	case ERTS_PORT_TASK_TIMEOUT:
	    reds += ERTS_PORT_REDS_TIMEOUT;
	    if (!(pp->status & ERTS_PORT_SFLGS_DEAD))
		(*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data);
	    break;
	case ERTS_PORT_TASK_INPUT:
	    reds += ERTS_PORT_REDS_INPUT;
	    ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
	    /* NOTE some windows drivers use ->ready_input for input and output */
	    (*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data, ptp->event);
	    io_tasks_executed++;
	    break;
	case ERTS_PORT_TASK_OUTPUT:
	    reds += ERTS_PORT_REDS_OUTPUT;
	    ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
	    (*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data, ptp->event);
	    io_tasks_executed++;
	    break;
	case ERTS_PORT_TASK_EVENT:
	    reds += ERTS_PORT_REDS_EVENT;
	    ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
	    (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data, ptp->event, ptp->event_data);
	    io_tasks_executed++;
	    break;
	case ERTS_PORT_TASK_DIST_CMD:
	    reds += erts_dist_command(pp, CONTEXT_REDS-reds);
	    break;
	default:
	    erl_exit(ERTS_ABORT_EXIT,
		     "Invalid port task type: %d\n",
		     (int) ptp->type);
	    break;
	}

	if ((pp->status & ERTS_PORT_SFLG_CLOSING)
	    && erts_is_port_ioq_empty(pp)) {
	    reds += ERTS_PORT_REDS_TERMINATE;
	    erts_terminate_port(pp);
	}

	ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));

#ifdef ERTS_SMP
	if (pp->xports)
	    erts_smp_xports_unlock(pp);
	ASSERT(!pp->xports);
#endif

	ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));

	port_task_free(ptp);

	erts_smp_runq_lock(runq);

	ptp = pop_task(ptqp);
    }

 tasks_done:

    erts_unblock_fpe(fpe_was_unmasked);

    if (io_tasks_executed) {
	ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
	       >= io_tasks_executed);
	erts_smp_atomic_add_relb(&erts_port_task_outstanding_io_tasks,
				 -1*io_tasks_executed);
    }

    *curr_port_pp = NULL;

#ifdef ERTS_SMP
    ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
#endif

    if (!pp->sched.taskq) {
	ASSERT(pp->sched.exe_taskq);
	pp->sched.exe_taskq = NULL;
    }
    else {
#ifdef ERTS_SMP
	ErtsRunQueue *xrunq;
#endif

	ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD));
	ASSERT(pp->sched.taskq->first);

#ifdef ERTS_SMP
	xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
	if (!xrunq) {
#endif
	    enqueue_port(runq, pp);
	    ASSERT(pp->sched.exe_taskq);
	    pp->sched.exe_taskq = NULL;
	    /* No need to notify ourselves about inc in runq. */
#ifdef ERTS_SMP
	}
	else {
	    /* Port emigrated ... */
	    erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
	    enqueue_port(xrunq, pp);
	    ASSERT(pp->sched.exe_taskq);
	    pp->sched.exe_taskq = NULL;
	    erts_smp_runq_unlock(xrunq);
	    erts_smp_notify_inc_runq(xrunq);
	}
#endif
	port_was_enqueued = 1;
    }

    res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
	   != (erts_aint_t) 0);

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);

    port_taskq_free(ptqp);

    if (erts_system_profile_flags.runnable_ports && (port_was_enqueued != 1)) {
    	profile_runnable_port(pp, am_inactive);
    }

    /* trace port scheduling, out */
    if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
    	trace_sched_ports(pp, am_out);
    }
#ifndef ERTS_SMP
    erts_port_release(pp);
#else
    {
	erts_aint_t refc;
	erts_smp_mtx_unlock(pp->lock);
	refc = erts_smp_atomic_dec_read_nob(&pp->refc);
	ASSERT(refc >= 0);
	if (refc == 0) {
	    erts_smp_runq_unlock(runq);
	    erts_port_cleanup(pp); /* Might aquire runq lock */
	    erts_smp_runq_lock(runq);
	    res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
		   != (erts_aint_t) 0);
	}
    }
#endif

 done:
    blk_data.resp = &res;

    ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));

    ERTS_PORT_REDUCTIONS_EXECUTED(runq, reds);

    return res;
}
/*
 * The worker thread function. Take a task from the queue and perform it if
 * there is any. Otherwise, put itself into the idle thread list and waiting
 * for signal to wake up.
 * The thread terminate directly by detach and exit when it is asked to stop
 * after finishing a task. Otherwise, the thread should be in idle thread list
 * and should be joined.
 */
static void *APR_THREAD_FUNC thread_pool_func(apr_thread_t * t, void *param)
{
    apr_thread_pool_t *me = param;
    apr_thread_pool_task_t *task = NULL;
    apr_interval_time_t wait;
    struct apr_thread_list_elt *elt;

    apr_thread_mutex_lock(me->lock);

    --me->spawning_cnt;

    elt = elt_new(me, t);
    if (!elt) {
        apr_thread_mutex_unlock(me->lock);
        apr_thread_exit(t, APR_ENOMEM);
    }

    while (!me->terminated && elt->state != TH_STOP) {
        /* Test if not new element, it is awakened from idle */
        if (APR_RING_NEXT(elt, link) != elt) {
            --me->idle_cnt;
            APR_RING_REMOVE(elt, link);
        }

        APR_RING_INSERT_TAIL(me->busy_thds, elt, apr_thread_list_elt, link);
        task = pop_task(me);
        while (NULL != task && !me->terminated) {
            ++me->tasks_run;
            elt->current_owner = task->owner;
            apr_thread_mutex_unlock(me->lock);
            apr_thread_data_set(task, "apr_thread_pool_task", NULL, t);
            task->func(t, task->param);
            apr_thread_mutex_lock(me->lock);
            APR_RING_INSERT_TAIL(me->recycled_tasks, task,
                                 apr_thread_pool_task, link);
            elt->current_owner = NULL;
            if (TH_STOP == elt->state) {
                break;
            }
            task = pop_task(me);
        }
        assert(NULL == elt->current_owner);
        if (TH_STOP != elt->state)
            APR_RING_REMOVE(elt, link);

        /* Test if a busy thread been asked to stop, which is not joinable */
        if ((me->idle_cnt >= me->idle_max
             && !(me->scheduled_task_cnt && 0 >= me->idle_max)
             && !me->idle_wait)
            || me->terminated || elt->state != TH_RUN) {
            --me->thd_cnt;
            if ((TH_PROBATION == elt->state) && me->idle_wait)
                ++me->thd_timed_out;
            APR_RING_INSERT_TAIL(me->recycled_thds, elt,
                                 apr_thread_list_elt, link);
            apr_thread_mutex_unlock(me->lock);
            apr_thread_detach(t);
            apr_thread_exit(t, APR_SUCCESS);
            return NULL;        /* should not be here, safe net */
        }

        /* busy thread become idle */
        ++me->idle_cnt;
        APR_RING_INSERT_TAIL(me->idle_thds, elt, apr_thread_list_elt, link);

        /* 
         * If there is a scheduled task, always scheduled to perform that task.
         * Since there is no guarantee that current idle threads are scheduled
         * for next scheduled task.
         */
        if (me->scheduled_task_cnt)
            wait = waiting_time(me);
        else if (me->idle_cnt > me->idle_max) {
            wait = me->idle_wait;
            elt->state = TH_PROBATION;
        }
        else
            wait = -1;

        if (wait >= 0) {
            apr_thread_cond_timedwait(me->cond, me->lock, wait);
        }
        else {
            apr_thread_cond_wait(me->cond, me->lock);
        }
    }

    /* idle thread been asked to stop, will be joined */
    --me->thd_cnt;
    apr_thread_mutex_unlock(me->lock);
    apr_thread_exit(t, APR_SUCCESS);
    return NULL;                /* should not be here, safe net */
}
threadpool::threadpool(size_t concurrency, size_t queue_size) :
    tasks(queue_size),
    shutdown_flag(false),
    threads()
#ifndef USE_YIELD
    ,wakeup_flag(false),
    wakeup_signal(),
    wakeup_mutex()
#endif
{
    // This is more efficient than creating the 'threads' vector with
    // size constructor and populating with std::generate since
    // std::thread objects will be constructed only to be replaced
    threads.reserve(concurrency);

    for (auto a = zero(concurrency); a < concurrency; ++a)
    {
        // emplace_back so thread is constructed in place
        threads.emplace_back([this]()
            {
                // checks whether parent threadpool is being destroyed,
                // if it is, stop running.
                while (!shutdown_flag.load())
                {
                    auto current_task_package = std::unique_ptr<task_package>{nullptr};

                    // use pop_task so we only ever have one reference to the
                    // task_package
                    if (pop_task(current_task_package))
                    {
                        try
                        {
                            current_task_package->task();
                            current_task_package->completion_promise.set_value();
                        }
                        catch (...)
                        {
                            // try and tell the owner that something bad has happened...
                            try
                            {
                                // ...but this can also throw, so stay protected
                                current_task_package->completion_promise.set_exception(std::current_exception());
                            }
                            catch (...) { }
                        }
                    }
                    else
                    {
                        // rather than spinning, give up thread time to other things
#ifdef USE_YIELD
                        std::this_thread::yield();
#else
                        auto lock = std::unique_lock<std::mutex>(wakeup_mutex);

                        wakeup_flag = false;

                        wakeup_signal.wait(lock, [this](){ return wakeup_flag; });
#endif
                    }
                }
            });

    }
};