/* * Finish the current sequence due to disconnect. * See mdc_import_event() */ void seq_client_flush(struct lu_client_seq *seq) { wait_queue_t link; LASSERT(seq != NULL); init_waitqueue_entry_current(&link); mutex_lock(&seq->lcs_mutex); while (seq->lcs_update) { add_wait_queue(&seq->lcs_waitq, &link); set_current_state(TASK_UNINTERRUPTIBLE); mutex_unlock(&seq->lcs_mutex); waitq_wait(&link, TASK_UNINTERRUPTIBLE); mutex_lock(&seq->lcs_mutex); remove_wait_queue(&seq->lcs_waitq, &link); set_current_state(TASK_RUNNING); } fid_zero(&seq->lcs_fid); /** * this id shld not be used for seq range allocation. * set to -1 for dgb check. */ seq->lcs_space.lsr_index = -1; range_init(&seq->lcs_space); mutex_unlock(&seq->lcs_mutex); }
void libcfs_debug_dumplog(void) { wait_queue_t wait; struct task_struct *dumper; ENTRY; /* we're being careful to ensure that the kernel thread is * able to set our state to running as it exits before we * get to schedule() */ init_waitqueue_entry_current(&wait); set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&debug_ctlwq, &wait); dumper = kthread_run(libcfs_debug_dumplog_thread, (void *)(long)current_pid(), "libcfs_debug_dumper"); if (IS_ERR(dumper)) printk(KERN_ERR "LustreError: cannot start log dump thread:" " %ld\n", PTR_ERR(dumper)); else waitq_wait(&wait, TASK_INTERRUPTIBLE); /* be sure to teardown if cfs_create_thread() failed */ remove_wait_queue(&debug_ctlwq, &wait); set_current_state(TASK_RUNNING); }
void mta_session(struct mta_relay *relay, struct mta_route *route) { struct mta_session *s; struct timeval tv; mta_session_init(); s = xcalloc(1, sizeof *s, "mta_session"); s->id = generate_uid(); s->relay = relay; s->route = route; s->io.sock = -1; if (relay->flags & RELAY_SSL && relay->flags & RELAY_AUTH) s->flags |= MTA_USE_AUTH; if (relay->pki_name) s->flags |= MTA_USE_CERT; if (relay->flags & RELAY_LMTP) s->flags |= MTA_LMTP; switch (relay->flags & (RELAY_SSL|RELAY_TLS_OPTIONAL)) { case RELAY_SSL: s->flags |= MTA_FORCE_ANYSSL; s->flags |= MTA_WANT_SECURE; break; case RELAY_SMTPS: s->flags |= MTA_FORCE_SMTPS; s->flags |= MTA_WANT_SECURE; break; case RELAY_STARTTLS: s->flags |= MTA_FORCE_TLS; s->flags |= MTA_WANT_SECURE; break; case RELAY_TLS_OPTIONAL: /* do not force anything, try tls then smtp */ break; default: s->flags |= MTA_FORCE_PLAIN; } log_debug("debug: mta: %p: spawned for relay %s", s, mta_relay_to_text(relay)); stat_increment("mta.session", 1); if (route->dst->ptrname || route->dst->lastptrquery) { /* We want to delay the connection since to always notify * the relay asynchronously. */ tv.tv_sec = 0; tv.tv_usec = 0; evtimer_set(&s->io.ev, mta_start, s); evtimer_add(&s->io.ev, &tv); } else if (waitq_wait(&route->dst->ptrname, mta_on_ptr, s)) { dns_query_ptr(s->id, s->route->dst->sa); tree_xset(&wait_ptr, s->id, s); s->flags |= MTA_WAIT; } }
static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov, struct lovsub_object *los, int idx) { struct cl_object *sub; struct lov_layout_raid0 *r0; struct lu_site *site; struct lu_site_bkt_data *bkt; wait_queue_t *waiter; r0 = &lov->u.raid0; LASSERT(r0->lo_sub[idx] == los); sub = lovsub2cl(los); site = sub->co_lu.lo_dev->ld_site; bkt = lu_site_bkt_from_fid(site, &sub->co_lu.lo_header->loh_fid); cl_object_kill(env, sub); /* release a reference to the sub-object and ... */ lu_object_ref_del(&sub->co_lu, "lov-parent", lov); cl_object_put(env, sub); /* ... wait until it is actually destroyed---sub-object clears its * ->lo_sub[] slot in lovsub_object_fini() */ if (r0->lo_sub[idx] == los) { waiter = &lov_env_info(env)->lti_waiter; init_waitqueue_entry_current(waiter); add_wait_queue(&bkt->lsb_marche_funebre, waiter); set_current_state(TASK_UNINTERRUPTIBLE); while (1) { /* this wait-queue is signaled at the end of * lu_object_free(). */ set_current_state(TASK_UNINTERRUPTIBLE); spin_lock(&r0->lo_sub_lock); if (r0->lo_sub[idx] == los) { spin_unlock(&r0->lo_sub_lock); waitq_wait(waiter, TASK_UNINTERRUPTIBLE); } else { spin_unlock(&r0->lo_sub_lock); set_current_state(TASK_RUNNING); break; } } remove_wait_queue(&bkt->lsb_marche_funebre, waiter); } LASSERT(r0->lo_sub[idx] == NULL); }
static int seq_fid_alloc_prep(struct lu_client_seq *seq, wait_queue_t *link) { if (seq->lcs_update) { add_wait_queue(&seq->lcs_waitq, link); set_current_state(TASK_UNINTERRUPTIBLE); mutex_unlock(&seq->lcs_mutex); waitq_wait(link, TASK_UNINTERRUPTIBLE); mutex_lock(&seq->lcs_mutex); remove_wait_queue(&seq->lcs_waitq, link); set_current_state(TASK_RUNNING); return -EAGAIN; } ++seq->lcs_update; mutex_unlock(&seq->lcs_mutex); return 0; }
/* * we allocate the requested pages atomically. */ int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc) { wait_queue_t waitlink; unsigned long this_idle = -1; cfs_time_t tick = 0; long now; int p_idx, g_idx; int i; LASSERT(desc->bd_iov_count > 0); LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages); /* resent bulk, enc iov might have been allocated previously */ if (desc->bd_enc_iov != NULL) return 0; OBD_ALLOC(desc->bd_enc_iov, desc->bd_iov_count * sizeof(*desc->bd_enc_iov)); if (desc->bd_enc_iov == NULL) return -ENOMEM; spin_lock(&page_pools.epp_lock); page_pools.epp_st_access++; again: if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) { if (tick == 0) tick = cfs_time_current(); now = cfs_time_current_sec(); page_pools.epp_st_missings++; page_pools.epp_pages_short += desc->bd_iov_count; if (enc_pools_should_grow(desc->bd_iov_count, now)) { page_pools.epp_growing = 1; spin_unlock(&page_pools.epp_lock); enc_pools_add_pages(page_pools.epp_pages_short / 2); spin_lock(&page_pools.epp_lock); page_pools.epp_growing = 0; enc_pools_wakeup(); } else { if (++page_pools.epp_waitqlen > page_pools.epp_st_max_wqlen) page_pools.epp_st_max_wqlen = page_pools.epp_waitqlen; set_current_state(TASK_UNINTERRUPTIBLE); init_waitqueue_entry_current(&waitlink); add_wait_queue(&page_pools.epp_waitq, &waitlink); spin_unlock(&page_pools.epp_lock); waitq_wait(&waitlink, TASK_UNINTERRUPTIBLE); remove_wait_queue(&page_pools.epp_waitq, &waitlink); LASSERT(page_pools.epp_waitqlen > 0); spin_lock(&page_pools.epp_lock); page_pools.epp_waitqlen--; } LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count); page_pools.epp_pages_short -= desc->bd_iov_count; this_idle = 0; goto again; } /* record max wait time */ if (unlikely(tick != 0)) { tick = cfs_time_current() - tick; if (tick > page_pools.epp_st_max_wait) page_pools.epp_st_max_wait = tick; } /* proceed with rest of allocation */ page_pools.epp_free_pages -= desc->bd_iov_count; p_idx = page_pools.epp_free_pages / PAGES_PER_POOL; g_idx = page_pools.epp_free_pages % PAGES_PER_POOL; for (i = 0; i < desc->bd_iov_count; i++) { LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL); desc->bd_enc_iov[i].kiov_page = page_pools.epp_pools[p_idx][g_idx]; page_pools.epp_pools[p_idx][g_idx] = NULL; if (++g_idx == PAGES_PER_POOL) { p_idx++; g_idx = 0; } } if (page_pools.epp_free_pages < page_pools.epp_st_lowfree) page_pools.epp_st_lowfree = page_pools.epp_free_pages; /* * new idle index = (old * weight + new) / (weight + 1) */ if (this_idle == -1) { this_idle = page_pools.epp_free_pages * IDLE_IDX_MAX / page_pools.epp_total_pages; } page_pools.epp_idle_idx = (page_pools.epp_idle_idx * IDLE_IDX_WEIGHT + this_idle) / (IDLE_IDX_WEIGHT + 1); page_pools.epp_last_access = cfs_time_current_sec(); spin_unlock(&page_pools.epp_lock); return 0; }