/* * Finish the current sequence due to disconnect. * See mdc_import_event() */ void seq_client_flush(struct lu_client_seq *seq) { cfs_waitlink_t link; LASSERT(seq != NULL); cfs_waitlink_init(&link); mutex_lock(&seq->lcs_mutex); while (seq->lcs_update) { cfs_waitq_add(&seq->lcs_waitq, &link); cfs_set_current_state(CFS_TASK_UNINT); mutex_unlock(&seq->lcs_mutex); cfs_waitq_wait(&link, CFS_TASK_UNINT); mutex_lock(&seq->lcs_mutex); cfs_waitq_del(&seq->lcs_waitq, &link); cfs_set_current_state(CFS_TASK_RUNNING); } fid_zero(&seq->lcs_fid); /** * this id shld not be used for seq range allocation. * set to -1 for dgb check. */ seq->lcs_space.lsr_index = -1; range_init(&seq->lcs_space); mutex_unlock(&seq->lcs_mutex); }
void libcfs_debug_dumplog(void) { cfs_waitlink_t wait; cfs_task_t *dumper; ENTRY; /* we're being careful to ensure that the kernel thread is * able to set our state to running as it exits before we * get to schedule() */ cfs_waitlink_init(&wait); cfs_set_current_state(CFS_TASK_INTERRUPTIBLE); cfs_waitq_add(&debug_ctlwq, &wait); dumper = cfs_kthread_run(libcfs_debug_dumplog_thread, (void*)(long)cfs_curproc_pid(), "libcfs_debug_dumper"); if (IS_ERR(dumper)) printk(CFS_KERN_ERR "LustreError: cannot start log dump thread:" " %ld\n", PTR_ERR(dumper)); else cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE); /* be sure to teardown if cfs_create_thread() failed */ cfs_waitq_del(&debug_ctlwq, &wait); cfs_set_current_state(CFS_TASK_RUNNING); }
/* Allocate new fid on passed client @seq and save it to @fid. */ int seq_client_alloc_fid(const struct lu_env *env, struct lu_client_seq *seq, struct lu_fid *fid) { cfs_waitlink_t link; int rc; ENTRY; LASSERT(seq != NULL); LASSERT(fid != NULL); cfs_waitlink_init(&link); cfs_mutex_lock(&seq->lcs_mutex); while (1) { seqno_t seqnr; if (!fid_is_zero(&seq->lcs_fid) && fid_oid(&seq->lcs_fid) < seq->lcs_width) { /* Just bump last allocated fid and return to caller. */ seq->lcs_fid.f_oid += 1; rc = 0; break; } rc = seq_fid_alloc_prep(seq, &link); if (rc) continue; rc = seq_client_alloc_seq(env, seq, &seqnr); if (rc) { CERROR("%s: Can't allocate new sequence, " "rc %d\n", seq->lcs_name, rc); seq_fid_alloc_fini(seq); cfs_mutex_unlock(&seq->lcs_mutex); RETURN(rc); } CDEBUG(D_INFO, "%s: Switch to sequence " "[0x%16.16"LPF64"x]\n", seq->lcs_name, seqnr); seq->lcs_fid.f_oid = LUSTRE_FID_INIT_OID; seq->lcs_fid.f_seq = seqnr; seq->lcs_fid.f_ver = 0; /* * Inform caller that sequence switch is performed to allow it * to setup FLD for it. */ rc = 1; seq_fid_alloc_fini(seq); break; } *fid = seq->lcs_fid; cfs_mutex_unlock(&seq->lcs_mutex); CDEBUG(D_INFO, "%s: Allocated FID "DFID"\n", seq->lcs_name, PFID(fid)); RETURN(rc); }
/** * Allocate the whole seq to the caller. **/ int seq_client_get_seq(const struct lu_env *env, struct lu_client_seq *seq, seqno_t *seqnr) { cfs_waitlink_t link; int rc; LASSERT(seqnr != NULL); mutex_lock(&seq->lcs_mutex); cfs_waitlink_init(&link); while (1) { rc = seq_fid_alloc_prep(seq, &link); if (rc == 0) break; } rc = seq_client_alloc_seq(env, seq, seqnr); if (rc) { CERROR("%s: Can't allocate new sequence, " "rc %d\n", seq->lcs_name, rc); seq_fid_alloc_fini(seq); mutex_unlock(&seq->lcs_mutex); return rc; } CDEBUG(D_INFO, "%s: allocate sequence " "[0x%16.16"LPF64"x]\n", seq->lcs_name, *seqnr); /* Since the caller require the whole seq, * so marked this seq to be used */ if (seq->lcs_type == LUSTRE_SEQ_METADATA) seq->lcs_fid.f_oid = LUSTRE_METADATA_SEQ_MAX_WIDTH; else seq->lcs_fid.f_oid = LUSTRE_DATA_SEQ_MAX_WIDTH; seq->lcs_fid.f_seq = *seqnr; seq->lcs_fid.f_ver = 0; /* * Inform caller that sequence switch is performed to allow it * to setup FLD for it. */ seq_fid_alloc_fini(seq); mutex_unlock(&seq->lcs_mutex); return rc; }
static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov, struct lovsub_object *los, int idx) { struct cl_object *sub; struct lov_layout_raid0 *r0; struct lu_site *site; struct lu_site_bkt_data *bkt; cfs_waitlink_t *waiter; r0 = &lov->u.raid0; LASSERT(r0->lo_sub[idx] == los); sub = lovsub2cl(los); site = sub->co_lu.lo_dev->ld_site; bkt = lu_site_bkt_from_fid(site, &sub->co_lu.lo_header->loh_fid); cl_object_kill(env, sub); /* release a reference to the sub-object and ... */ lu_object_ref_del(&sub->co_lu, "lov-parent", lov); cl_object_put(env, sub); /* ... wait until it is actually destroyed---sub-object clears its * ->lo_sub[] slot in lovsub_object_fini() */ if (r0->lo_sub[idx] == los) { waiter = &lov_env_info(env)->lti_waiter; cfs_waitlink_init(waiter); cfs_waitq_add(&bkt->lsb_marche_funebre, waiter); cfs_set_current_state(CFS_TASK_UNINT); while (1) { /* this wait-queue is signaled at the end of * lu_object_free(). */ cfs_set_current_state(CFS_TASK_UNINT); spin_lock(&r0->lo_sub_lock); if (r0->lo_sub[idx] == los) { spin_unlock(&r0->lo_sub_lock); cfs_waitq_wait(waiter, CFS_TASK_UNINT); } else { spin_unlock(&r0->lo_sub_lock); cfs_set_current_state(CFS_TASK_RUNNING); break; } } cfs_waitq_del(&bkt->lsb_marche_funebre, waiter); } LASSERT(r0->lo_sub[idx] == NULL); }
/* * we allocate the requested pages atomically. */ int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc) { cfs_waitlink_t waitlink; unsigned long this_idle = -1; cfs_time_t tick = 0; long now; int p_idx, g_idx; int i; LASSERT(desc->bd_iov_count > 0); LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages); /* resent bulk, enc iov might have been allocated previously */ if (desc->bd_enc_iov != NULL) return 0; OBD_ALLOC(desc->bd_enc_iov, desc->bd_iov_count * sizeof(*desc->bd_enc_iov)); if (desc->bd_enc_iov == NULL) return -ENOMEM; cfs_spin_lock(&page_pools.epp_lock); page_pools.epp_st_access++; again: if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) { if (tick == 0) tick = cfs_time_current(); now = cfs_time_current_sec(); page_pools.epp_st_missings++; page_pools.epp_pages_short += desc->bd_iov_count; if (enc_pools_should_grow(desc->bd_iov_count, now)) { page_pools.epp_growing = 1; cfs_spin_unlock(&page_pools.epp_lock); enc_pools_add_pages(page_pools.epp_pages_short / 2); cfs_spin_lock(&page_pools.epp_lock); page_pools.epp_growing = 0; enc_pools_wakeup(); } else { if (++page_pools.epp_waitqlen > page_pools.epp_st_max_wqlen) page_pools.epp_st_max_wqlen = page_pools.epp_waitqlen; cfs_set_current_state(CFS_TASK_UNINT); cfs_waitlink_init(&waitlink); cfs_waitq_add(&page_pools.epp_waitq, &waitlink); cfs_spin_unlock(&page_pools.epp_lock); cfs_waitq_wait(&waitlink, CFS_TASK_UNINT); cfs_waitq_del(&page_pools.epp_waitq, &waitlink); LASSERT(page_pools.epp_waitqlen > 0); cfs_spin_lock(&page_pools.epp_lock); page_pools.epp_waitqlen--; } LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count); page_pools.epp_pages_short -= desc->bd_iov_count; this_idle = 0; goto again; } /* record max wait time */ if (unlikely(tick != 0)) { tick = cfs_time_current() - tick; if (tick > page_pools.epp_st_max_wait) page_pools.epp_st_max_wait = tick; } /* proceed with rest of allocation */ page_pools.epp_free_pages -= desc->bd_iov_count; p_idx = page_pools.epp_free_pages / PAGES_PER_POOL; g_idx = page_pools.epp_free_pages % PAGES_PER_POOL; for (i = 0; i < desc->bd_iov_count; i++) { LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL); desc->bd_enc_iov[i].kiov_page = page_pools.epp_pools[p_idx][g_idx]; page_pools.epp_pools[p_idx][g_idx] = NULL; if (++g_idx == PAGES_PER_POOL) { p_idx++; g_idx = 0; } } if (page_pools.epp_free_pages < page_pools.epp_st_lowfree) page_pools.epp_st_lowfree = page_pools.epp_free_pages; /* * new idle index = (old * weight + new) / (weight + 1) */ if (this_idle == -1) { this_idle = page_pools.epp_free_pages * IDLE_IDX_MAX / page_pools.epp_total_pages; } page_pools.epp_idle_idx = (page_pools.epp_idle_idx * IDLE_IDX_WEIGHT + this_idle) / (IDLE_IDX_WEIGHT + 1); page_pools.epp_last_access = cfs_time_current_sec(); cfs_spin_unlock(&page_pools.epp_lock); return 0; }
int LNetEQPoll (lnet_handle_eq_t *eventqs, int neq, int timeout_ms, lnet_event_t *event, int *which) { int i; int rc; #ifdef __KERNEL__ cfs_waitlink_t wl; cfs_time_t now; #else struct timeval then; struct timeval now; # ifdef HAVE_LIBPTHREAD struct timespec ts; # endif lnet_ni_t *eqwaitni = the_lnet.ln_eqwaitni; #endif ENTRY; LASSERT (the_lnet.ln_init); LASSERT (the_lnet.ln_refcount > 0); if (neq < 1) RETURN(-ENOENT); LNET_LOCK(); for (;;) { #ifndef __KERNEL__ LNET_UNLOCK(); /* Recursion breaker */ if (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING && !LNetHandleIsEqual(eventqs[0], the_lnet.ln_rc_eqh)) lnet_router_checker(); LNET_LOCK(); #endif for (i = 0; i < neq; i++) { lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]); if (eq == NULL) { LNET_UNLOCK(); RETURN(-ENOENT); } rc = lib_get_event (eq, event); if (rc != 0) { LNET_UNLOCK(); *which = i; RETURN(rc); } } #ifdef __KERNEL__ if (timeout_ms == 0) { LNET_UNLOCK(); RETURN (0); } cfs_waitlink_init(&wl); set_current_state(TASK_INTERRUPTIBLE); cfs_waitq_add(&the_lnet.ln_waitq, &wl); LNET_UNLOCK(); if (timeout_ms < 0) { cfs_waitq_wait (&wl, CFS_TASK_INTERRUPTIBLE); } else { struct timeval tv; now = cfs_time_current(); cfs_waitq_timedwait(&wl, CFS_TASK_INTERRUPTIBLE, cfs_time_seconds(timeout_ms)/1000); cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv); timeout_ms -= tv.tv_sec * 1000 + tv.tv_usec / 1000; if (timeout_ms < 0) timeout_ms = 0; } LNET_LOCK(); cfs_waitq_del(&the_lnet.ln_waitq, &wl); #else if (eqwaitni != NULL) { /* I have a single NI that I have to call into, to get * events queued, or to block. */ lnet_ni_addref_locked(eqwaitni); LNET_UNLOCK(); if (timeout_ms <= 0) { (eqwaitni->ni_lnd->lnd_wait)(eqwaitni, timeout_ms); } else { gettimeofday(&then, NULL); (eqwaitni->ni_lnd->lnd_wait)(eqwaitni, timeout_ms); gettimeofday(&now, NULL); timeout_ms -= (now.tv_sec - then.tv_sec) * 1000 + (now.tv_usec - then.tv_usec) / 1000; if (timeout_ms < 0) timeout_ms = 0; } LNET_LOCK(); lnet_ni_decref_locked(eqwaitni); /* don't call into eqwaitni again if timeout has * expired */ if (timeout_ms == 0) eqwaitni = NULL; continue; /* go back and check for events */ } if (timeout_ms == 0) { LNET_UNLOCK(); RETURN (0); } # ifndef HAVE_LIBPTHREAD /* If I'm single-threaded, LNET fails at startup if it can't * set the_lnet.ln_eqwaitni correctly. */ LBUG(); # else if (timeout_ms < 0) { pthread_cond_wait(&the_lnet.ln_cond, &the_lnet.ln_lock); } else { gettimeofday(&then, NULL); ts.tv_sec = then.tv_sec + timeout_ms/1000; ts.tv_nsec = then.tv_usec * 1000 + (timeout_ms%1000) * 1000000; if (ts.tv_nsec >= 1000000000) { ts.tv_sec++; ts.tv_nsec -= 1000000000; } pthread_cond_timedwait(&the_lnet.ln_cond, &the_lnet.ln_lock, &ts); gettimeofday(&now, NULL); timeout_ms -= (now.tv_sec - then.tv_sec) * 1000 + (now.tv_usec - then.tv_usec) / 1000; if (timeout_ms < 0) timeout_ms = 0; } # endif #endif } }