/* * Finish the current sequence due to disconnect. * See mdc_import_event() */ void seq_client_flush(struct lu_client_seq *seq) { cfs_waitlink_t link; LASSERT(seq != NULL); cfs_waitlink_init(&link); mutex_lock(&seq->lcs_mutex); while (seq->lcs_update) { cfs_waitq_add(&seq->lcs_waitq, &link); cfs_set_current_state(CFS_TASK_UNINT); mutex_unlock(&seq->lcs_mutex); cfs_waitq_wait(&link, CFS_TASK_UNINT); mutex_lock(&seq->lcs_mutex); cfs_waitq_del(&seq->lcs_waitq, &link); cfs_set_current_state(CFS_TASK_RUNNING); } fid_zero(&seq->lcs_fid); /** * this id shld not be used for seq range allocation. * set to -1 for dgb check. */ seq->lcs_space.lsr_index = -1; range_init(&seq->lcs_space); mutex_unlock(&seq->lcs_mutex); }
void libcfs_debug_dumplog(void) { cfs_waitlink_t wait; cfs_task_t *dumper; ENTRY; /* we're being careful to ensure that the kernel thread is * able to set our state to running as it exits before we * get to schedule() */ cfs_waitlink_init(&wait); cfs_set_current_state(CFS_TASK_INTERRUPTIBLE); cfs_waitq_add(&debug_ctlwq, &wait); dumper = cfs_kthread_run(libcfs_debug_dumplog_thread, (void*)(long)cfs_curproc_pid(), "libcfs_debug_dumper"); if (IS_ERR(dumper)) printk(CFS_KERN_ERR "LustreError: cannot start log dump thread:" " %ld\n", PTR_ERR(dumper)); else cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE); /* be sure to teardown if cfs_create_thread() failed */ cfs_waitq_del(&debug_ctlwq, &wait); cfs_set_current_state(CFS_TASK_RUNNING); }
static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov, struct lovsub_object *los, int idx) { struct cl_object *sub; struct lov_layout_raid0 *r0; struct lu_site *site; struct lu_site_bkt_data *bkt; cfs_waitlink_t *waiter; r0 = &lov->u.raid0; LASSERT(r0->lo_sub[idx] == los); sub = lovsub2cl(los); site = sub->co_lu.lo_dev->ld_site; bkt = lu_site_bkt_from_fid(site, &sub->co_lu.lo_header->loh_fid); cl_object_kill(env, sub); /* release a reference to the sub-object and ... */ lu_object_ref_del(&sub->co_lu, "lov-parent", lov); cl_object_put(env, sub); /* ... wait until it is actually destroyed---sub-object clears its * ->lo_sub[] slot in lovsub_object_fini() */ if (r0->lo_sub[idx] == los) { waiter = &lov_env_info(env)->lti_waiter; cfs_waitlink_init(waiter); cfs_waitq_add(&bkt->lsb_marche_funebre, waiter); cfs_set_current_state(CFS_TASK_UNINT); while (1) { /* this wait-queue is signaled at the end of * lu_object_free(). */ cfs_set_current_state(CFS_TASK_UNINT); spin_lock(&r0->lo_sub_lock); if (r0->lo_sub[idx] == los) { spin_unlock(&r0->lo_sub_lock); cfs_waitq_wait(waiter, CFS_TASK_UNINT); } else { spin_unlock(&r0->lo_sub_lock); cfs_set_current_state(CFS_TASK_RUNNING); break; } } cfs_waitq_del(&bkt->lsb_marche_funebre, waiter); } LASSERT(r0->lo_sub[idx] == NULL); }
static int seq_fid_alloc_prep(struct lu_client_seq *seq, cfs_waitlink_t *link) { if (seq->lcs_update) { cfs_waitq_add(&seq->lcs_waitq, link); cfs_set_current_state(CFS_TASK_UNINT); mutex_unlock(&seq->lcs_mutex); cfs_waitq_wait(link, CFS_TASK_UNINT); mutex_lock(&seq->lcs_mutex); cfs_waitq_del(&seq->lcs_waitq, link); cfs_set_current_state(CFS_TASK_RUNNING); return -EAGAIN; } ++seq->lcs_update; mutex_unlock(&seq->lcs_mutex); return 0; }
int __obd_fail_timeout_set(__u32 id, __u32 value, int ms, int set) { int ret = 0; ret = __obd_fail_check_set(id, value, set); if (ret) { CERROR("obd_fail_timeout id %x sleeping for %dms\n", id, ms); cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT, cfs_time_seconds(ms) / 1000); cfs_set_current_state(CFS_TASK_RUNNING); CERROR("obd_fail_timeout id %x awake\n", id); } return ret; }
/* * we allocate the requested pages atomically. */ int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc) { cfs_waitlink_t waitlink; unsigned long this_idle = -1; cfs_time_t tick = 0; long now; int p_idx, g_idx; int i; LASSERT(desc->bd_iov_count > 0); LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages); /* resent bulk, enc iov might have been allocated previously */ if (desc->bd_enc_iov != NULL) return 0; OBD_ALLOC(desc->bd_enc_iov, desc->bd_iov_count * sizeof(*desc->bd_enc_iov)); if (desc->bd_enc_iov == NULL) return -ENOMEM; cfs_spin_lock(&page_pools.epp_lock); page_pools.epp_st_access++; again: if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) { if (tick == 0) tick = cfs_time_current(); now = cfs_time_current_sec(); page_pools.epp_st_missings++; page_pools.epp_pages_short += desc->bd_iov_count; if (enc_pools_should_grow(desc->bd_iov_count, now)) { page_pools.epp_growing = 1; cfs_spin_unlock(&page_pools.epp_lock); enc_pools_add_pages(page_pools.epp_pages_short / 2); cfs_spin_lock(&page_pools.epp_lock); page_pools.epp_growing = 0; enc_pools_wakeup(); } else { if (++page_pools.epp_waitqlen > page_pools.epp_st_max_wqlen) page_pools.epp_st_max_wqlen = page_pools.epp_waitqlen; cfs_set_current_state(CFS_TASK_UNINT); cfs_waitlink_init(&waitlink); cfs_waitq_add(&page_pools.epp_waitq, &waitlink); cfs_spin_unlock(&page_pools.epp_lock); cfs_waitq_wait(&waitlink, CFS_TASK_UNINT); cfs_waitq_del(&page_pools.epp_waitq, &waitlink); LASSERT(page_pools.epp_waitqlen > 0); cfs_spin_lock(&page_pools.epp_lock); page_pools.epp_waitqlen--; } LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count); page_pools.epp_pages_short -= desc->bd_iov_count; this_idle = 0; goto again; } /* record max wait time */ if (unlikely(tick != 0)) { tick = cfs_time_current() - tick; if (tick > page_pools.epp_st_max_wait) page_pools.epp_st_max_wait = tick; } /* proceed with rest of allocation */ page_pools.epp_free_pages -= desc->bd_iov_count; p_idx = page_pools.epp_free_pages / PAGES_PER_POOL; g_idx = page_pools.epp_free_pages % PAGES_PER_POOL; for (i = 0; i < desc->bd_iov_count; i++) { LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL); desc->bd_enc_iov[i].kiov_page = page_pools.epp_pools[p_idx][g_idx]; page_pools.epp_pools[p_idx][g_idx] = NULL; if (++g_idx == PAGES_PER_POOL) { p_idx++; g_idx = 0; } } if (page_pools.epp_free_pages < page_pools.epp_st_lowfree) page_pools.epp_st_lowfree = page_pools.epp_free_pages; /* * new idle index = (old * weight + new) / (weight + 1) */ if (this_idle == -1) { this_idle = page_pools.epp_free_pages * IDLE_IDX_MAX / page_pools.epp_total_pages; } page_pools.epp_idle_idx = (page_pools.epp_idle_idx * IDLE_IDX_WEIGHT + this_idle) / (IDLE_IDX_WEIGHT + 1); page_pools.epp_last_access = cfs_time_current_sec(); cfs_spin_unlock(&page_pools.epp_lock); return 0; }