Пример #1
0
void
erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp)
{
    ErtsThrPrgrData *tpd = thr_prgr_data(esdp);
    ErtsThrPrgrVal current, val;

#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_check_exact(NULL, 0);
#endif

    /*
     * We aren't allowed to continue until our thread
     * progress is past global current.
     */
    val = current = read_acqb(&erts_thr_prgr__.current);
    while (1) {
	val++;
	if (val == ERTS_THR_PRGR_VAL_WAITING)
	    val = 0;
	tpd->confirmed = val;
	set_mb(&intrnl->thr[tpd->id].data.current, val);
	val = read_acqb(&erts_thr_prgr__.current);
	if (current == val)
	    break;
	current = val;
    }
    if (block_count_inc())
	block_thread(tpd);
    if (update(tpd))
	leader_update(tpd);
}
Пример #2
0
    thread_id_t iteration_begin(iteration_t iter)
    {
        iter_ = iter;
        running_threads_count = thread_count;
        finished_thread_count_ = 0;
        timed_thread_count_ = 0;
        spurious_thread_count_ = 0;
        dynamic_thread_count_ = 0;

        for (thread_id_t i = 0; i != thread_count; ++i)
        {
            running_threads.push_back(i);
            threads_[i].reset(params_);
        }

        for (thread_id_t i = thread_count - total_dynamic_threads_; i != thread_count; ++i)
        {
            dynamic_threads_[dynamic_thread_count_++] = &threads_[i];
            block_thread(i, false);
        }

        thread_id_t const th = self().iteration_begin_impl();
    
        thread_ = &threads_[th];

        return th;
    }
Пример #3
0
/* Gain ownership of a mutex object or block until it becomes free */
void mutex_lock(struct mutex *m)
{
    struct thread_entry *current = __running_self_entry();

    if(current == m->blocker.thread)
    {
        /* current thread already owns this mutex */
        m->recursion++;
        return;
    }

    /* lock out other cores */
    corelock_lock(&m->cl);

    /* must read thread again inside cs (a multiprocessor concern really) */
    if(LIKELY(m->blocker.thread == NULL))
    {
        /* lock is open */
        m->blocker.thread = current;
        corelock_unlock(&m->cl);
        return;
    }

    /* block until the lock is open... */
    disable_irq();
    block_thread(current, TIMEOUT_BLOCK, &m->queue, &m->blocker);

    corelock_unlock(&m->cl);

    /* ...and turn control over to next thread */
    switch_thread();
}
Пример #4
0
    thread_finish_result thread_finished()
    {
        RL_VERIFY(thread_->state_ == thread_state_running);
        block_thread(thread_->index_, false);
        thread_->state_ = thread_state_finished;
        finished_thread_count_ += 1;
        self().thread_finished_impl();
retry:
        if (finished_thread_count_ == thread_count)
        {
            return thread_finish_result_last;
        }
        else if (is_deadlock())
        {
            if (dynamic_thread_count_)
            {
                while (dynamic_thread_count_)
                {
                    thread_info_t* th = dynamic_threads_[--dynamic_thread_count_];
                    unblock_thread(th->index_);
                }
                goto retry;
            }
            return thread_finish_result_deadlock;
        }
        else
        {
            return thread_finish_result_normal;
        }
    }
Пример #5
0
int block_threads(torque_ctx *ctx){
	int ret = 0;

	if(ctx->ev){
		ret = block_thread(ctx->ev->nexttid);
	}
	return ret;
}
/* Down the semaphore's count or wait for 'timeout' ticks for it to go up if
 * it is already 0. 'timeout' as TIMEOUT_NOBLOCK (0) will not block and may
 * safely be used in an ISR. */
int semaphore_wait(struct semaphore *s, int timeout)
{
    int ret = OBJ_WAIT_TIMEDOUT;

    int oldlevel = disable_irq_save();
    corelock_lock(&s->cl);

    int count = s->count;
    if(LIKELY(count > 0))
    {
        /* count is not zero; down it */
        s->count = count - 1;
        ret = OBJ_WAIT_SUCCEEDED;
    }
    else if(timeout != 0)
    {
        ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT, oldlevel);

        /* too many waits - block until count is upped... */
        struct thread_entry *current = __running_self_entry();

        block_thread(current, timeout, &s->queue, NULL);
        corelock_unlock(&s->cl);

        /* ...and turn control over to next thread */
        switch_thread();

        /* if explicit wake indicated; do no more */
        if(LIKELY(!wait_queue_ptr(current)))
            return OBJ_WAIT_SUCCEEDED;

        disable_irq();
        corelock_lock(&s->cl);

        /* see if anyone got us after the expired wait */
        if(wait_queue_try_remove(current))
        {
            count = s->count;
            if(count > 0)
            {
                /* down it lately */
                s->count = count - 1;
                ret = OBJ_WAIT_SUCCEEDED;
            }
        }
    }
    /* else just polling it */

    corelock_unlock(&s->cl);
    restore_irq(oldlevel);

    return ret;
}
void thread_wait(unsigned int thread_id)
{
    struct thread_entry *current = cores[CURRENT_CORE].running;
    struct thread_entry *thread = thread_id_entry(thread_id);

    if (thread->id == thread_id && thread->state != STATE_KILLED)
    {
        current->bqp = &thread->queue;
        block_thread(current);
        switch_thread();
    }
}
Пример #8
0
/*
 *	Will block the thread if needed, otherwise will just decrement
 *	   the semaphore's value
 *
 *	@Param  - Pointer to the semaphore that will impose waiting
 */
void mysem_wait(Semaphore* sem){
	DISABLE_INTERRUPTS();
	sem->value--;
	tcb* currentThread = get_mythread();
	if(sem->value < 0){
		mythread_block(currentThread);
		block_thread(sem, currentThread);
		mysem_printQueue(sem);
	}
	ENABLE_INTERRUPTS();
	// Stall the thread until the thread becomes READY
	while(currentThread->state==BLOCKED){
		asm("nop");
	}
}
Пример #9
0
static inline int
reap_thread(pthread_t tid){
	int ret = 0;

	// POSIX has a special case for process-autodirected signals; kill(2)
	// does not return until at least one signal is delivered. This works
	// around a race condition (see block_thread()) *most* of the time.
	// There's two problems with it: should some other thread have
	// EVTHREAD_TERM unblocked, this breaks (but they shouldn't be doing
	// that anyway), and should some other signal already be pending, it
	// breaks (we're only guaranteed that *one* signal gets delivered
	// before we return, so we can still hit the block_thread() early).
	ret |= kill(getpid(),EVTHREAD_TERM);
	ret |= block_thread(tid);
	return ret;
}
Пример #10
0
static erts_aint32_t
thr_progress_block(ErtsThrPrgrData *tpd, int wait)
{
    erts_tse_t *event = NULL; /* Remove erroneous warning... sigh... */
    erts_aint32_t lflgs, bc;

    if (tpd->is_blocking++)
	return (erts_aint32_t) 0;

    while (1) {
	lflgs = erts_atomic32_read_bor_nob(&intrnl->misc.data.lflgs,
					   ERTS_THR_PRGR_LFLG_BLOCK);
	if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
	    block_thread(tpd);
	else
	    break;
    }

#if ERTS_THR_PRGR_PRINT_BLOCKERS
    erts_fprintf(stderr, "block(%d)\n", tpd->id);
#endif

    ASSERT(ERTS_AINT_NULL
	   == erts_atomic_read_nob(&intrnl->misc.data.blocker_event));

    if (wait) {
	event = erts_tse_fetch();
	erts_tse_reset(event);
	erts_atomic_set_nob(&intrnl->misc.data.blocker_event,
			    (erts_aint_t) event);
    }
    if (tpd->is_managed)
	erts_atomic32_dec_nob(&intrnl->misc.data.block_count);
    bc = erts_atomic32_read_band_mb(&intrnl->misc.data.block_count,
				    ~ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING);
    bc &= ~ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING;
    if (wait) {
	while (bc != 0) {
	    erts_tse_wait(event);
	    erts_tse_reset(event);
	    bc = erts_atomic32_read_acqb(&intrnl->misc.data.block_count);
	}
    }
    return bc;

}
Пример #11
0
    bool park_current_thread(bool is_timed, bool allow_spurious_wakeup)
    {
        if (is_timed)
        {
            timed_threads_[timed_thread_count_++] = thread_;
            RL_VERIFY(timed_thread_count_ <= thread_count);
        }

        if (allow_spurious_wakeup)
        {
            spurious_threads_[spurious_thread_count_++] = thread_;
            RL_VERIFY(spurious_thread_count_ <= thread_count);
        }

        block_thread(thread_->index_, true);

        return is_deadlock() ? false : true;
    }
Пример #12
0
/* Down the semaphore's count or wait for 'timeout' ticks for it to go up if
 * it is already 0. 'timeout' as TIMEOUT_NOBLOCK (0) will not block and may
 * safely be used in an ISR. */
int semaphore_wait(struct semaphore *s, int timeout)
{
    int ret;
    int oldlevel;
    int count;

    oldlevel = disable_irq_save();
    corelock_lock(&s->cl);

    count = s->count;

    if(LIKELY(count > 0))
    {
        /* count is not zero; down it */
        s->count = count - 1;
        ret = OBJ_WAIT_SUCCEEDED;
    }
    else if(timeout == 0)
    {
        /* just polling it */
        ret = OBJ_WAIT_TIMEDOUT;
    }
    else
    {
        /* too many waits - block until count is upped... */
        struct thread_entry * current = thread_self_entry();
        IF_COP( current->obj_cl = &s->cl; )
        current->bqp = &s->queue;
        /* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was
         * explicit in semaphore_release */
        current->retval = OBJ_WAIT_TIMEDOUT;

        if(timeout > 0)
            block_thread_w_tmo(current, timeout); /* ...or timed out... */
        else
            block_thread(current);                /* -timeout = infinite */

        corelock_unlock(&s->cl);

        /* ...and turn control over to next thread */
        switch_thread();

        return current->retval;
    }
Пример #13
0
static ERTS_INLINE int
leader_update(ErtsThrPrgrData *tpd)
{
#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_check_exact(NULL, 0);
#endif
    if (!tpd->leader) {
	/* Probably need to block... */
	block_thread(tpd);
    }
    else {
	ErtsThrPrgrVal current;
	int ix, chk_next_ix, umrefc_ix, my_ix, no_managed, waiting_unmanaged;
	erts_aint32_t lflgs;
	ErtsThrPrgrVal next;
	erts_aint_t refc;

	my_ix = tpd->id;

	if (tpd->leader_state.current == ERTS_THR_PRGR_VAL_WAITING) {
	    /* Took over as leader from another thread */
	    tpd->leader_state.current = read_nob(&erts_thr_prgr__.current);
	    tpd->leader_state.next = tpd->leader_state.current;
	    tpd->leader_state.next++;
	    if (tpd->leader_state.next == ERTS_THR_PRGR_VAL_WAITING)
		tpd->leader_state.next = 0;
	    tpd->leader_state.chk_next_ix = intrnl->misc.data.chk_next_ix;
	    tpd->leader_state.umrefc_ix.waiting = intrnl->misc.data.umrefc_ix.waiting;
	    tpd->leader_state.umrefc_ix.current =
		(int) erts_atomic32_read_nob(&intrnl->misc.data.umrefc_ix.current);

	    if (tpd->confirmed == tpd->leader_state.current) {
		ErtsThrPrgrVal val = tpd->leader_state.current + 1;
		if (val == ERTS_THR_PRGR_VAL_WAITING)
		    val = 0;
		tpd->confirmed = val;
		set_mb(&intrnl->thr[my_ix].data.current, val);
	    }
	}


	next = tpd->leader_state.next;

	waiting_unmanaged = 0;
	umrefc_ix = -1; /* Shut up annoying warning */

	chk_next_ix = tpd->leader_state.chk_next_ix;
	no_managed = intrnl->managed.no;
	ASSERT(0 <= chk_next_ix && chk_next_ix <= no_managed);
	/* Check manged threads */
	if (chk_next_ix < no_managed) {
	    for (ix = chk_next_ix; ix < no_managed; ix++) {
		ErtsThrPrgrVal tmp;
		if (ix == my_ix)
		    continue;
		tmp = read_nob(&intrnl->thr[ix].data.current);
		if (tmp != next && tmp != ERTS_THR_PRGR_VAL_WAITING) {
		    tpd->leader_state.chk_next_ix = ix;
		    ASSERT(erts_thr_progress_has_passed__(next, tmp));
		    goto done;
		}
	    }
	}

	/* Check unmanged threads */
	waiting_unmanaged = tpd->leader_state.umrefc_ix.waiting != -1;
	umrefc_ix = (waiting_unmanaged
		     ? tpd->leader_state.umrefc_ix.waiting
		     : tpd->leader_state.umrefc_ix.current);
	refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
	ASSERT(refc >= 0);
	if (refc != 0) {
	    int new_umrefc_ix;

	    if (waiting_unmanaged)
		goto done;

	    new_umrefc_ix = (umrefc_ix + 1) & 0x1;
	    tpd->leader_state.umrefc_ix.waiting = umrefc_ix;
	    tpd->leader_state.chk_next_ix = no_managed;
	    erts_atomic32_set_nob(&intrnl->misc.data.umrefc_ix.current,
				  (erts_aint32_t) new_umrefc_ix);
	    ETHR_MEMBAR(ETHR_StoreLoad);
	    refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
	    ASSERT(refc >= 0);
	    waiting_unmanaged = 1;
	    if (refc != 0)
		goto done;
	}

	/* Make progress */
	current = next;

	next++;
	if (next == ERTS_THR_PRGR_VAL_WAITING)
	    next = 0;

	set_nob(&intrnl->thr[my_ix].data.current, next);
	set_mb(&erts_thr_prgr__.current, current);
	tpd->confirmed = next;
	tpd->leader_state.next = next;
	tpd->leader_state.current = current;

#if ERTS_THR_PRGR_PRINT_VAL
	if (current % 1000 == 0)
	    erts_fprintf(stderr, "%b64u\n", current);
#endif
	handle_wakeup_requests(current);

	if (waiting_unmanaged) {
	    waiting_unmanaged = 0;
	    tpd->leader_state.umrefc_ix.waiting = -1;
	    erts_atomic32_read_band_nob(&intrnl->misc.data.lflgs,
					~ERTS_THR_PRGR_LFLG_WAITING_UM);
	}
	tpd->leader_state.chk_next_ix = 0;

    done:

	if (tpd->active) {
	    lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
	    if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
		(void) block_thread(tpd);
	}
	else {
	    int force_wakeup_check = 0;
	    erts_aint32_t set_flags = ERTS_THR_PRGR_LFLG_NO_LEADER;
	    tpd->leader = 0;
	    tpd->leader_state.current = ERTS_THR_PRGR_VAL_WAITING;
#if ERTS_THR_PRGR_PRINT_LEADER
	    erts_fprintf(stderr, "L <- %d\n", tpd->id);
#endif

	    ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(tpd->id, 0);

	    intrnl->misc.data.umrefc_ix.waiting
		= tpd->leader_state.umrefc_ix.waiting;
	    if (waiting_unmanaged)
		set_flags |= ERTS_THR_PRGR_LFLG_WAITING_UM;

	    lflgs = erts_atomic32_read_bor_relb(&intrnl->misc.data.lflgs,
						set_flags);
	    lflgs |= set_flags;
	    if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
		lflgs = block_thread(tpd);

	    if (waiting_unmanaged) {
		/* Need to check umrefc again */
		ETHR_MEMBAR(ETHR_StoreLoad);
		refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
		if (refc == 0) {
		    /* Need to force wakeup check */
		    force_wakeup_check = 1;
		}
	    }

	    if ((force_wakeup_check
		 || ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER
			       | ERTS_THR_PRGR_LFLG_WAITING_UM
			       | ERTS_THR_PRGR_LFLG_ACTIVE_MASK))
		     == ERTS_THR_PRGR_LFLG_NO_LEADER))
		&& got_sched_wakeups()) {
		/* Someone need to make progress */
		wakeup_managed(0);
	    }
	}
    }

    return tpd->leader;
}