static inline void enc_pools_wakeup(void) { LASSERT_SPIN_LOCKED(&page_pools.epp_lock); LASSERT(page_pools.epp_waitqlen >= 0); if (unlikely(page_pools.epp_waitqlen)) { LASSERT(cfs_waitq_active(&page_pools.epp_waitq)); cfs_waitq_broadcast(&page_pools.epp_waitq); } }
/** * Companion of qsd_request_enter() dropping lqe_pending_req to 0. */ static inline void qsd_request_exit(struct lquota_entry *lqe) { if (lqe->lqe_pending_req != 1) { LQUOTA_ERROR(lqe, "lqe_pending_req != 1!!!"); LBUG(); } lqe->lqe_pending_req--; lqe->lqe_pending_rel = 0; cfs_waitq_broadcast(&lqe->lqe_waiters); }
static void cfs_wi_sched_shutdown(cfs_wi_sched_t *sched) { cfs_wi_sched_lock(sched); LASSERT(cfs_list_empty(&sched->ws_runq)); LASSERT(cfs_list_empty(&sched->ws_rerunq)); sched->ws_shuttingdown = 1; #ifdef __KERNEL__ cfs_waitq_broadcast(&sched->ws_waitq); #endif cfs_wi_sched_unlock(sched); }
void lnet_enq_event_locked (lnet_eq_t *eq, lnet_event_t *ev) { lnet_event_t *eq_slot; /* Allocate the next queue slot */ ev->sequence = eq->eq_enq_seq++; /* size must be a power of 2 to handle sequence # overflow */ LASSERT (eq->eq_size != 0 && eq->eq_size == LOWEST_BIT_SET (eq->eq_size)); eq_slot = eq->eq_events + (ev->sequence & (eq->eq_size - 1)); /* There is no race since both event consumers and event producers * take the LNET_LOCK, so we don't screw around with memory * barriers, setting the sequence number last or wierd structure * layout assertions. */ *eq_slot = *ev; /* Call the callback handler (if any) */ if (eq->eq_callback != NULL) eq->eq_callback (eq_slot); #ifdef __KERNEL__ /* Wake anyone waiting in LNetEQPoll() */ if (cfs_waitq_active(&the_lnet.ln_waitq)) cfs_waitq_broadcast(&the_lnet.ln_waitq); #else # ifndef HAVE_LIBPTHREAD /* LNetEQPoll() calls into _the_ LND to wait for action */ # else /* Wake anyone waiting in LNetEQPoll() */ pthread_cond_broadcast(&the_lnet.ln_cond); # endif #endif }