/* * Similar to gettoken but we acquire a shared token instead of an exclusive * token. */ void lwkt_gettoken_shared(lwkt_token_t tok) { thread_t td = curthread; lwkt_tokref_t ref; ref = td->td_toks_stop; KKASSERT(ref < &td->td_toks_end); ++td->td_toks_stop; cpu_ccfence(); _lwkt_tokref_init(ref, tok, td, TOK_EXCLREQ); #ifdef DEBUG_LOCKS /* * Taking a pool token in shared mode is a bad idea; other * addresses deeper in the call stack may hash to the same pool * token and you may end up with an exclusive-shared livelock. * Warn in this condition. */ if ((tok >= &pool_tokens[0].token) && (tok < &pool_tokens[LWKT_NUM_POOL_TOKENS].token)) kprintf("Warning! Taking pool token %p in shared mode\n", tok); #endif if (_lwkt_trytokref_spin(ref, td, TOK_EXCLREQ)) return; /* * Give up running if we can't acquire the token right now. * * Since the tokref is already active the scheduler now * takes care of acquisition, so we need only call * lwkt_switch(). * * Since we failed this was not a recursive token so upon * return tr_tok->t_ref should be assigned to this specific * ref. */ td->td_wmesg = tok->t_desc; ++tok->t_collisions; logtoken(fail, ref); td->td_toks_have = td->td_toks_stop - 1; if (tokens_debug_output > 0) { --tokens_debug_output; spin_lock(&tok_debug_spin); kprintf("Shar Token thread %p %s %s\n", td, tok->t_desc, td->td_comm); print_backtrace(6); kprintf("\n"); spin_unlock(&tok_debug_spin); } lwkt_switch(); logtoken(succ, ref); }
/* * Get a serializing token. This routine can block. */ void lwkt_gettoken(lwkt_token_t tok) { thread_t td = curthread; lwkt_tokref_t ref; ref = td->td_toks_stop; KKASSERT(ref < &td->td_toks_end); ++td->td_toks_stop; cpu_ccfence(); _lwkt_tokref_init(ref, tok, td, TOK_EXCLUSIVE|TOK_EXCLREQ); #ifdef DEBUG_LOCKS /* * Taking an exclusive token after holding it shared will * livelock. Scan for that case and assert. */ lwkt_tokref_t tk; int found = 0; for (tk = &td->td_toks_base; tk < ref; tk++) { if (tk->tr_tok != tok) continue; found++; if (tk->tr_count & TOK_EXCLUSIVE) goto good; } /* We found only shared instances of this token if found >0 here */ KASSERT((found == 0), ("Token %p s/x livelock", tok)); good: #endif if (_lwkt_trytokref_spin(ref, td, TOK_EXCLUSIVE|TOK_EXCLREQ)) return; /* * Give up running if we can't acquire the token right now. * * Since the tokref is already active the scheduler now * takes care of acquisition, so we need only call * lwkt_switch(). * * Since we failed this was not a recursive token so upon * return tr_tok->t_ref should be assigned to this specific * ref. */ td->td_wmesg = tok->t_desc; ++tok->t_collisions; logtoken(fail, ref); td->td_toks_have = td->td_toks_stop - 1; lwkt_switch(); logtoken(succ, ref); KKASSERT(tok->t_ref == ref); }
/* * Attempt to acquire a token, return TRUE on success, FALSE on failure. * * We setup the tokref in case we actually get the token (if we switch later * it becomes mandatory so we set TOK_EXCLREQ), but we call trytokref without * TOK_EXCLREQ in case we fail. */ int lwkt_trytoken(lwkt_token_t tok) { thread_t td = curthread; lwkt_tokref_t ref; ref = td->td_toks_stop; KKASSERT(ref < &td->td_toks_end); ++td->td_toks_stop; cpu_ccfence(); _lwkt_tokref_init(ref, tok, td, TOK_EXCLUSIVE|TOK_EXCLREQ); if (_lwkt_trytokref(ref, td, TOK_EXCLUSIVE)) return TRUE; /* * Failed, unpend the request */ cpu_ccfence(); --td->td_toks_stop; ++tok->t_collisions; return FALSE; }