Esempio n. 1
0
/*
 * Similar to gettoken but we acquire a shared token instead of an exclusive
 * token.
 */
void
lwkt_gettoken_shared(lwkt_token_t tok)
{
	thread_t td = curthread;
	lwkt_tokref_t ref;

	ref = td->td_toks_stop;
	KKASSERT(ref < &td->td_toks_end);
	++td->td_toks_stop;
	cpu_ccfence();
	_lwkt_tokref_init(ref, tok, td, TOK_EXCLREQ);

#ifdef DEBUG_LOCKS
        /*
         * Taking a pool token in shared mode is a bad idea; other
         * addresses deeper in the call stack may hash to the same pool
         * token and you may end up with an exclusive-shared livelock.
         * Warn in this condition.
         */
        if ((tok >= &pool_tokens[0].token) &&
            (tok < &pool_tokens[LWKT_NUM_POOL_TOKENS].token))
                kprintf("Warning! Taking pool token %p in shared mode\n", tok);
#endif


	if (_lwkt_trytokref_spin(ref, td, TOK_EXCLREQ))
		return;

	/*
	 * Give up running if we can't acquire the token right now.
	 *
	 * Since the tokref is already active the scheduler now
	 * takes care of acquisition, so we need only call
	 * lwkt_switch().
	 *
	 * Since we failed this was not a recursive token so upon
	 * return tr_tok->t_ref should be assigned to this specific
	 * ref.
	 */
	td->td_wmesg = tok->t_desc;
	++tok->t_collisions;
	logtoken(fail, ref);
	td->td_toks_have = td->td_toks_stop - 1;

	if (tokens_debug_output > 0) {
		--tokens_debug_output;
		spin_lock(&tok_debug_spin);
		kprintf("Shar Token thread %p %s %s\n",
			td, tok->t_desc, td->td_comm);
		print_backtrace(6);
		kprintf("\n");
		spin_unlock(&tok_debug_spin);
	}

	lwkt_switch();
	logtoken(succ, ref);
}
Esempio n. 2
0
/*
 * Get a serializing token.  This routine can block.
 */
void
lwkt_gettoken(lwkt_token_t tok)
{
	thread_t td = curthread;
	lwkt_tokref_t ref;

	ref = td->td_toks_stop;
	KKASSERT(ref < &td->td_toks_end);
	++td->td_toks_stop;
	cpu_ccfence();
	_lwkt_tokref_init(ref, tok, td, TOK_EXCLUSIVE|TOK_EXCLREQ);

#ifdef DEBUG_LOCKS
	/*
	 * Taking an exclusive token after holding it shared will
	 * livelock. Scan for that case and assert.
	 */
	lwkt_tokref_t tk;
	int found = 0;
	for (tk = &td->td_toks_base; tk < ref; tk++) {
		if (tk->tr_tok != tok)
			continue;
		
		found++;
		if (tk->tr_count & TOK_EXCLUSIVE) 
			goto good;
	}
	/* We found only shared instances of this token if found >0 here */
	KASSERT((found == 0), ("Token %p s/x livelock", tok));
good:
#endif

	if (_lwkt_trytokref_spin(ref, td, TOK_EXCLUSIVE|TOK_EXCLREQ))
		return;

	/*
	 * Give up running if we can't acquire the token right now.
	 *
	 * Since the tokref is already active the scheduler now
	 * takes care of acquisition, so we need only call
	 * lwkt_switch().
	 *
	 * Since we failed this was not a recursive token so upon
	 * return tr_tok->t_ref should be assigned to this specific
	 * ref.
	 */
	td->td_wmesg = tok->t_desc;
	++tok->t_collisions;
	logtoken(fail, ref);
	td->td_toks_have = td->td_toks_stop - 1;
	lwkt_switch();
	logtoken(succ, ref);
	KKASSERT(tok->t_ref == ref);
}
Esempio n. 3
0
/*
 * Obtain all the tokens required by the specified thread on the current
 * cpu, return 0 on failure and non-zero on success.  If a failure occurs
 * any partially acquired tokens will be released prior to return.
 *
 * lwkt_getalltokens is called by the LWKT scheduler to re-acquire all
 * tokens that the thread had to release when it switched away.
 *
 * If spinning is non-zero this function acquires the tokens in a particular
 * order to deal with potential deadlocks.  We simply use address order for
 * the case.
 *
 * Called from a critical section.
 */
int
lwkt_getalltokens(thread_t td, int spinning)
{
	lwkt_tokref_t scan;
	lwkt_token_t tok;

	if (spinning)
		return(_lwkt_getalltokens_sorted(td));

	/*
	 * Acquire tokens in forward order, assign or validate tok->t_ref.
	 */
	for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
		tok = scan->tr_tok;
		for (;;) {
			/*
			 * Only try really hard on the last token
			 */
			if (scan == td->td_toks_stop - 1) {
			    if (_lwkt_trytokref_spin(scan, td, scan->tr_count))
				    break;
			} else {
			    if (_lwkt_trytokref(scan, td, scan->tr_count))
				    break;
			}

			/*
			 * Otherwise we failed to acquire all the tokens.
			 * Release whatever we did get.
			 */
			KASSERT(tok->t_desc,
				("token %p is not initialized", tok));
			strncpy(td->td_gd->gd_cnt.v_lock_name,
				tok->t_desc,
				sizeof(td->td_gd->gd_cnt.v_lock_name) - 1);

			if (lwkt_sched_debug > 0) {
				--lwkt_sched_debug;
				kprintf("toka %p %s %s\n",
					tok, tok->t_desc, td->td_comm);
			}
			td->td_wmesg = tok->t_desc;
			++tok->t_collisions;
			while (--scan >= &td->td_toks_base)
				_lwkt_reltokref(scan, td);
			return(FALSE);
		}
	}
	return (TRUE);
}
Esempio n. 4
0
/*
 * This is the decontention version of lwkt_getalltokens().  The tokens are
 * acquired in address-sorted order to deal with any deadlocks.  Ultimately
 * token failures will spin into the scheduler and get here.
 *
 * Called from critical section
 */
static
int
_lwkt_getalltokens_sorted(thread_t td)
{
	lwkt_tokref_t sort_array[LWKT_MAXTOKENS];
	lwkt_tokref_t scan;
	lwkt_token_t tok;
	int i;
	int j;
	int n;

	/*
	 * Sort the token array.  Yah yah, I know this isn't fun.
	 *
	 * NOTE: Recursively acquired tokens are ordered the same as in the
	 *	 td_toks_array so we can always get the earliest one first.
	 */
	i = 0;
	scan = &td->td_toks_base;
	while (scan < td->td_toks_stop) {
		for (j = 0; j < i; ++j) {
			if (scan->tr_tok < sort_array[j]->tr_tok)
				break;
		}
		if (j != i) {
			bcopy(sort_array + j, sort_array + j + 1,
			      (i - j) * sizeof(lwkt_tokref_t));
		}
		sort_array[j] = scan;
		++scan;
		++i;
	}
	n = i;

	/*
	 * Acquire tokens in forward order, assign or validate tok->t_ref.
	 */
	for (i = 0; i < n; ++i) {
		scan = sort_array[i];
		tok = scan->tr_tok;
		for (;;) {
			/*
			 * Only try really hard on the last token
			 */
			if (scan == td->td_toks_stop - 1) {
			    if (_lwkt_trytokref_spin(scan, td, scan->tr_count))
				    break;
			} else {
			    if (_lwkt_trytokref(scan, td, scan->tr_count))
				    break;
			}

			/*
			 * Otherwise we failed to acquire all the tokens.
			 * Release whatever we did get.
			 */
			if (lwkt_sched_debug > 0) {
				--lwkt_sched_debug;
				kprintf("tokb %p %s %s\n",
					tok, tok->t_desc, td->td_comm);
			}
			td->td_wmesg = tok->t_desc;
			++tok->t_collisions;
			while (--i >= 0) {
				scan = sort_array[i];
				_lwkt_reltokref(scan, td);
			}
			return(FALSE);
		}
	}

	/*
	 * We were successful, there is no need for another core to signal
	 * us.
	 */
	return (TRUE);
}