Exemple #1
0
static inline void enc_pools_wakeup(void)
{
        LASSERT_SPIN_LOCKED(&page_pools.epp_lock);
        LASSERT(page_pools.epp_waitqlen >= 0);

        if (unlikely(page_pools.epp_waitqlen)) {
                LASSERT(cfs_waitq_active(&page_pools.epp_waitq));
                cfs_waitq_broadcast(&page_pools.epp_waitq);
        }
}
Exemple #2
0
void
lnet_enq_event_locked (lnet_eq_t *eq, lnet_event_t *ev)
{
        lnet_event_t  *eq_slot;

        /* Allocate the next queue slot */
        ev->sequence = eq->eq_enq_seq++;

        /* size must be a power of 2 to handle sequence # overflow */
        LASSERT (eq->eq_size != 0 &&
                 eq->eq_size == LOWEST_BIT_SET (eq->eq_size));
        eq_slot = eq->eq_events + (ev->sequence & (eq->eq_size - 1));

        /* There is no race since both event consumers and event producers
         * take the LNET_LOCK, so we don't screw around with memory
         * barriers, setting the sequence number last or wierd structure
         * layout assertions. */
        *eq_slot = *ev;

        /* Call the callback handler (if any) */
        if (eq->eq_callback != NULL)
                eq->eq_callback (eq_slot);

#ifdef __KERNEL__
        /* Wake anyone waiting in LNetEQPoll() */
        if (cfs_waitq_active(&the_lnet.ln_waitq))
                cfs_waitq_broadcast(&the_lnet.ln_waitq);
#else
# ifndef HAVE_LIBPTHREAD
        /* LNetEQPoll() calls into _the_ LND to wait for action */
# else
        /* Wake anyone waiting in LNetEQPoll() */
        pthread_cond_broadcast(&the_lnet.ln_cond);
# endif
#endif
}