int cfs_timer_is_armed(cfs_timer_t *l) { if (cfs_time_before(cfs_time_current(), l->expires)) return 1; else return 0; }
static inline void update_capa_timer(struct obd_capa *ocapa, cfs_time_t expiry) { if (cfs_time_before(expiry, ll_capa_timer.expires) || !timer_pending(&ll_capa_timer)) { mod_timer(&ll_capa_timer, expiry); DEBUG_CAPA(D_SEC, &ocapa->c_capa, "ll_capa_timer update: %lu/%lu by", expiry, jiffies); } }
/* * statfs */ static inline int osp_statfs_need_update(struct osp_device *d) { return !cfs_time_before(cfs_time_current(), d->opd_statfs_fresh_till); }
int quota_adjust_slave_lqs(struct quota_adjust_qunit *oqaq, struct lustre_quota_ctxt *qctxt) { struct lustre_qunit_size *lqs = NULL; unsigned long *unit, *tune; signed long tmp = 0; cfs_time_t time_limit = 0, *shrink; int i, rc = 0; ENTRY; LASSERT(qctxt); lqs = quota_search_lqs(LQS_KEY(QAQ_IS_GRP(oqaq), oqaq->qaq_id), qctxt, QAQ_IS_CREATE_LQS(oqaq) ? 1 : 0); if (lqs == NULL || IS_ERR(lqs)){ CERROR("fail to find a lqs for %sid %u!\n", QAQ_IS_GRP(oqaq) ? "g" : "u", oqaq->qaq_id); RETURN(PTR_ERR(lqs)); } CDEBUG(D_QUOTA, "before: bunit: %lu, iunit: %lu.\n", lqs->lqs_bunit_sz, lqs->lqs_iunit_sz); cfs_spin_lock(&lqs->lqs_lock); for (i = 0; i < 2; i++) { if (i == 0 && !QAQ_IS_ADJBLK(oqaq)) continue; if (i == 1 && !QAQ_IS_ADJINO(oqaq)) continue; tmp = i ? (lqs->lqs_iunit_sz - oqaq->qaq_iunit_sz) : (lqs->lqs_bunit_sz - oqaq->qaq_bunit_sz); shrink = i ? &lqs->lqs_last_ishrink : &lqs->lqs_last_bshrink; time_limit = cfs_time_add(i ? lqs->lqs_last_ishrink : lqs->lqs_last_bshrink, cfs_time_seconds(qctxt->lqc_switch_seconds)); unit = i ? &lqs->lqs_iunit_sz : &lqs->lqs_bunit_sz; tune = i ? &lqs->lqs_itune_sz : &lqs->lqs_btune_sz; /* quota master shrinks */ if (qctxt->lqc_handler && tmp > 0) *shrink = cfs_time_current(); /* quota master enlarges */ if (qctxt->lqc_handler && tmp < 0) { /* in case of ping-pong effect, don't enlarge lqs * in a short time */ if (*shrink && cfs_time_before(cfs_time_current(), time_limit)) tmp = 0; } /* when setquota, don't enlarge lqs b=18616 */ if (QAQ_IS_CREATE_LQS(oqaq) && tmp < 0) tmp = 0; if (tmp != 0) { *unit = i ? oqaq->qaq_iunit_sz : oqaq->qaq_bunit_sz; *tune = (*unit) / 2; } if (tmp > 0) rc |= i ? LQS_INO_DECREASE : LQS_BLK_DECREASE; if (tmp < 0) rc |= i ? LQS_INO_INCREASE : LQS_BLK_INCREASE; } cfs_spin_unlock(&lqs->lqs_lock); CDEBUG(D_QUOTA, "after: bunit: %lu, iunit: %lu.\n", lqs->lqs_bunit_sz, lqs->lqs_iunit_sz); lqs_putref(lqs); RETURN(rc); }
static int quota_chk_acq_common(struct obd_device *obd, struct obd_export *exp, const unsigned int id[], int pending[], int count, quota_acquire acquire, struct obd_trans_info *oti, int isblk, struct inode *inode, int frags) { struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt; struct timeval work_start; struct timeval work_end; long timediff; struct l_wait_info lwi = { 0 }; int rc = 0, cycle = 0, count_err = 1; ENTRY; if (!quota_is_set(obd, id, isblk ? QB_SET : QI_SET)) RETURN(0); if (isblk && (exp->exp_failed || exp->exp_abort_active_req)) /* If the client has been evicted or if it * timed out and tried to reconnect already, * abort the request immediately */ RETURN(-ENOTCONN); CDEBUG(D_QUOTA, "check quota for %s\n", obd->obd_name); pending[USRQUOTA] = pending[GRPQUOTA] = 0; /* Unfortunately, if quota master is too busy to handle the * pre-dqacq in time and quota hash on ost is used up, we * have to wait for the completion of in flight dqacq/dqrel, * in order to get enough quota for write b=12588 */ cfs_gettimeofday(&work_start); while ((rc = quota_check_common(obd, id, pending, count, cycle, isblk, inode, frags)) & QUOTA_RET_ACQUOTA) { cfs_spin_lock(&qctxt->lqc_lock); if (!qctxt->lqc_import && oti) { cfs_spin_unlock(&qctxt->lqc_lock); LASSERT(oti && oti->oti_thread && oti->oti_thread->t_watchdog); lc_watchdog_disable(oti->oti_thread->t_watchdog); CDEBUG(D_QUOTA, "sleep for quota master\n"); l_wait_event(qctxt->lqc_wait_for_qmaster, check_qm(qctxt), &lwi); CDEBUG(D_QUOTA, "wake up when quota master is back\n"); lc_watchdog_touch(oti->oti_thread->t_watchdog, CFS_GET_TIMEOUT(oti->oti_thread->t_svc)); } else { cfs_spin_unlock(&qctxt->lqc_lock); } cycle++; if (isblk) OBD_FAIL_TIMEOUT(OBD_FAIL_OST_HOLD_WRITE_RPC, 90); /* after acquire(), we should run quota_check_common again * so that we confirm there are enough quota to finish write */ rc = acquire(obd, id, oti, isblk); /* please reference to dqacq_completion for the below */ /* a new request is finished, try again */ if (rc == QUOTA_REQ_RETURNED) { CDEBUG(D_QUOTA, "finish a quota req, try again\n"); continue; } /* it is out of quota already */ if (rc == -EDQUOT) { CDEBUG(D_QUOTA, "out of quota, return -EDQUOT\n"); break; } /* Related quota has been disabled by master, but enabled by * slave, do not try again. */ if (unlikely(rc == -ESRCH)) { CERROR("mismatched quota configuration, stop try.\n"); break; } if (isblk && (exp->exp_failed || exp->exp_abort_active_req)) /* The client has been evicted or tried to * to reconnect already, abort the request */ RETURN(-ENOTCONN); /* -EBUSY and others, wait a second and try again */ if (rc < 0) { cfs_waitq_t waitq; struct l_wait_info lwi; if (oti && oti->oti_thread && oti->oti_thread->t_watchdog) lc_watchdog_touch(oti->oti_thread->t_watchdog, CFS_GET_TIMEOUT(oti->oti_thread->t_svc)); CDEBUG(D_QUOTA, "rc: %d, count_err: %d\n", rc, count_err++); cfs_waitq_init(&waitq); lwi = LWI_TIMEOUT(cfs_time_seconds(min(cycle, 10)), NULL, NULL); l_wait_event(waitq, 0, &lwi); } if (rc < 0 || cycle % 10 == 0) { cfs_spin_lock(&last_print_lock); if (last_print == 0 || cfs_time_before((last_print + cfs_time_seconds(30)), cfs_time_current())) { last_print = cfs_time_current(); cfs_spin_unlock(&last_print_lock); CWARN("still haven't managed to acquire quota " "space from the quota master after %d " "retries (err=%d, rc=%d)\n", cycle, count_err - 1, rc); } else { cfs_spin_unlock(&last_print_lock); } } CDEBUG(D_QUOTA, "recheck quota with rc: %d, cycle: %d\n", rc, cycle); } cfs_gettimeofday(&work_end); timediff = cfs_timeval_sub(&work_end, &work_start, NULL); lprocfs_counter_add(qctxt->lqc_stats, isblk ? LQUOTA_WAIT_FOR_CHK_BLK : LQUOTA_WAIT_FOR_CHK_INO, timediff); if (rc > 0) rc = 0; RETURN(rc); }
static int mdt_ck_thread_main(void *args) { struct mdt_device *mdt = args; struct ptlrpc_thread *thread = &mdt->mdt_ck_thread; struct lustre_capa_key *bkey = &mdt->mdt_capa_keys[0], *rkey = &mdt->mdt_capa_keys[1]; struct lustre_capa_key *tmp; struct lu_env env; struct mdt_thread_info *info; struct md_device *next; struct l_wait_info lwi = { 0 }; mdsno_t mdsnum; int rc; ENTRY; unshare_fs_struct(); cfs_block_allsigs(); thread_set_flags(thread, SVC_RUNNING); wake_up(&thread->t_ctl_waitq); rc = lu_env_init(&env, LCT_MD_THREAD|LCT_REMEMBER|LCT_NOREF); if (rc) RETURN(rc); thread->t_env = &env; env.le_ctx.lc_thread = thread; env.le_ctx.lc_cookie = 0x1; info = lu_context_key_get(&env.le_ctx, &mdt_thread_key); LASSERT(info != NULL); tmp = &info->mti_capa_key; mdsnum = mdt_seq_site(mdt)->ss_node_id; while (1) { l_wait_event(thread->t_ctl_waitq, thread_is_stopping(thread) || thread_is_event(thread), &lwi); if (thread_is_stopping(thread)) break; thread_clear_flags(thread, SVC_EVENT); if (cfs_time_before(cfs_time_current(), mdt->mdt_ck_expiry)) break; *tmp = *rkey; make_capa_key(tmp, mdsnum, rkey->lk_keyid); next = mdt->mdt_child; rc = next->md_ops->mdo_update_capa_key(&env, next, tmp); if (!rc) { spin_lock(&capa_lock); *bkey = *rkey; *rkey = *tmp; spin_unlock(&capa_lock); rc = write_capa_keys(&env, mdt, mdt->mdt_capa_keys); if (rc) { spin_lock(&capa_lock); *rkey = *bkey; memset(bkey, 0, sizeof(*bkey)); spin_unlock(&capa_lock); } else { set_capa_key_expiry(mdt); DEBUG_CAPA_KEY(D_SEC, rkey, "new"); } } if (rc) { DEBUG_CAPA_KEY(D_ERROR, rkey, "update failed for"); /* next retry is in 300 sec */ mdt->mdt_ck_expiry = jiffies + 300 * HZ; } cfs_timer_arm(&mdt->mdt_ck_timer, mdt->mdt_ck_expiry); CDEBUG(D_SEC, "mdt_ck_timer %lu\n", mdt->mdt_ck_expiry); } lu_env_fini(&env); thread_set_flags(thread, SVC_STOPPED); wake_up(&thread->t_ctl_waitq); RETURN(0); }