static int fticket_ctor(void *mem, int size, void *arg, int flags) { struct fuse_ticket *ftick = mem; struct fuse_data *data = arg; debug_printf("ftick=%p data=%p\n", ftick, data); FUSE_ASSERT_MS_DONE(ftick); FUSE_ASSERT_AW_DONE(ftick); ftick->tk_data = data; if (ftick->tk_unique != 0) fticket_refresh(ftick); /* May be truncated to 32 bits */ ftick->tk_unique = atomic_fetchadd_long(&data->ticketer, 1); if (ftick->tk_unique == 0) ftick->tk_unique = atomic_fetchadd_long(&data->ticketer, 1); refcount_init(&ftick->tk_refcount, 1); atomic_add_acq_int(&fuse_ticket_count, 1); return 0; }
/* * Attempt to acquire a shared or exclusive token. Returns TRUE on success, * FALSE on failure. * * If TOK_EXCLUSIVE is set in mode we are attempting to get an exclusive * token, otherwise are attempting to get a shared token. * * If TOK_EXCLREQ is set in mode this is a blocking operation, otherwise * it is a non-blocking operation (for both exclusive or shared acquisions). */ static __inline int _lwkt_trytokref(lwkt_tokref_t ref, thread_t td, long mode) { lwkt_token_t tok; lwkt_tokref_t oref; long count; tok = ref->tr_tok; KASSERT(((mode & TOK_EXCLREQ) == 0 || /* non blocking */ td->td_gd->gd_intr_nesting_level == 0 || panic_cpu_gd == mycpu), ("Attempt to acquire token %p not already " "held in hard code section", tok)); if (mode & TOK_EXCLUSIVE) { /* * Attempt to get an exclusive token */ for (;;) { count = tok->t_count; oref = tok->t_ref; /* can be NULL */ cpu_ccfence(); if ((count & ~TOK_EXCLREQ) == 0) { /* * It is possible to get the exclusive bit. * We must clear TOK_EXCLREQ on successful * acquisition. */ if (atomic_cmpset_long(&tok->t_count, count, (count & ~TOK_EXCLREQ) | TOK_EXCLUSIVE)) { KKASSERT(tok->t_ref == NULL); tok->t_ref = ref; return TRUE; } /* retry */ } else if ((count & TOK_EXCLUSIVE) && oref >= &td->td_toks_base && oref < td->td_toks_stop) { /* * Our thread already holds the exclusive * bit, we treat this tokref as a shared * token (sorta) to make the token release * code easier. * * NOTE: oref cannot race above if it * happens to be ours, so we're good. * But we must still have a stable * variable for both parts of the * comparison. * * NOTE: Since we already have an exclusive * lock and don't need to check EXCLREQ * we can just use an atomic_add here */ atomic_add_long(&tok->t_count, TOK_INCR); ref->tr_count &= ~TOK_EXCLUSIVE; return TRUE; } else if ((mode & TOK_EXCLREQ) && (count & TOK_EXCLREQ) == 0) { /* * Unable to get the exclusive bit but being * asked to set the exclusive-request bit. * Since we are going to retry anyway just * set the bit unconditionally. */ atomic_set_long(&tok->t_count, TOK_EXCLREQ); return FALSE; } else { /* * Unable to get the exclusive bit and not * being asked to set the exclusive-request * (aka lwkt_trytoken()), or EXCLREQ was * already set. */ cpu_pause(); return FALSE; } /* retry */ } } else { /* * Attempt to get a shared token. Note that TOK_EXCLREQ * for shared tokens simply means the caller intends to * block. We never actually set the bit in tok->t_count. */ for (;;) { count = tok->t_count; oref = tok->t_ref; /* can be NULL */ cpu_ccfence(); if ((count & (TOK_EXCLUSIVE/*|TOK_EXCLREQ*/)) == 0) { /* * It may be possible to get the token shared. */ if ((atomic_fetchadd_long(&tok->t_count, TOK_INCR) & TOK_EXCLUSIVE) == 0) { return TRUE; } atomic_fetchadd_long(&tok->t_count, -TOK_INCR); /* retry */ } else if ((count & TOK_EXCLUSIVE) && oref >= &td->td_toks_base && oref < td->td_toks_stop) { /* * We own the exclusive bit on the token so * we can in fact also get it shared. */ atomic_add_long(&tok->t_count, TOK_INCR); return TRUE; } else { /* * We failed to get the token shared */ return FALSE; } /* retry */ } } }