/* * Retrieve quota settings for a given identifier. * * \param env - is the environment passed by the caller * \param qmt - is the quota master target * \param pool_id - is the 16-bit pool identifier * \param restype - is the pool type, either block (i.e. LQUOTA_RES_DT) or inode * (i.e. LQUOTA_RES_MD) * \param qtype - is the quota type * \param id - is the quota indentifier for which we want to acces quota * settings. * \param hard - is the output variable where to copy the hard limit * \param soft - is the output variable where to copy the soft limit * \param time - is the output variable where to copy the grace time */ static int qmt_get(const struct lu_env *env, struct qmt_device *qmt, __u16 pool_id, __u8 restype, __u8 qtype, union lquota_id *id, __u64 *hard, __u64 *soft, __u64 *time) { struct lquota_entry *lqe; ENTRY; /* look-up lqe structure containing quota settings */ lqe = qmt_pool_lqe_lookup(env, qmt, pool_id, restype, qtype, id); if (IS_ERR(lqe)) RETURN(PTR_ERR(lqe)); /* copy quota settings */ lqe_read_lock(lqe); LQUOTA_DEBUG(lqe, "fetch settings"); if (hard != NULL) *hard = lqe->lqe_hardlimit; if (soft != NULL) *soft = lqe->lqe_softlimit; if (time != NULL) *time = lqe->lqe_gracetime; lqe_read_unlock(lqe); lqe_putref(lqe); RETURN(0); }
/** * Trigger pre-acquire/release if necessary. * It's only used by ldiskfs osd so far. When unlink a file in ldiskfs, the * quota accounting isn't updated when the transaction stopped. Instead, it'll * be updated on the final iput, so qsd_op_adjust() will be called then (in * osd_object_delete()) to trigger quota release if necessary. * * \param env - the environment passed by the caller * \param qsd - is the qsd instance associated with the device in charge * of the operation. * \param qid - is the lquota ID of the user/group for which to trigger * quota space adjustment * \param qtype - is the quota type (USRQUOTA or GRPQUOTA) */ void qsd_op_adjust(const struct lu_env *env, struct qsd_instance *qsd, union lquota_id *qid, int qtype) { struct lquota_entry *lqe; struct qsd_qtype_info *qqi; bool adjust; ENTRY; if (unlikely(qsd == NULL)) RETURN_EXIT; /* We don't enforce quota until the qsd_instance is started */ read_lock(&qsd->qsd_lock); if (!qsd->qsd_started) { read_unlock(&qsd->qsd_lock); RETURN_EXIT; } read_unlock(&qsd->qsd_lock); qqi = qsd->qsd_type_array[qtype]; LASSERT(qqi); if (!qsd_type_enabled(qsd, qtype) || qqi->qqi_acct_obj == NULL || qid->qid_uid == 0) RETURN_EXIT; read_lock(&qsd->qsd_lock); if (!qsd->qsd_started) { read_unlock(&qsd->qsd_lock); RETURN_EXIT; } read_unlock(&qsd->qsd_lock); lqe = lqe_locate(env, qqi->qqi_site, qid); if (IS_ERR(lqe)) { CERROR("%s: fail to locate lqe for id:"LPU64", type:%d\n", qsd->qsd_svname, qid->qid_uid, qtype); RETURN_EXIT; } qsd_refresh_usage(env, lqe); lqe_read_lock(lqe); adjust = qsd_adjust_needed(lqe); lqe_read_unlock(lqe); if (adjust) qsd_adjust(env, lqe); lqe_putref(lqe); EXIT; }
/* * Return lquota entry structure associated with a per-ID lock * * \param lock - is the per-ID lock from which we should extract the lquota * entry * \param reset - whether lock->l_ast_data should be cleared */ static struct lquota_entry *qsd_id_ast_data_get(struct ldlm_lock *lock, bool reset) { struct lquota_entry *lqe; ENTRY; lock_res_and_lock(lock); lqe = lock->l_ast_data; if (lqe != NULL) { lqe_getref(lqe); if (reset) lock->l_ast_data = NULL; } unlock_res_and_lock(lock); if (reset && lqe != NULL) /* release lqe reference hold for the lock */ lqe_putref(lqe); RETURN(lqe); }
/** * Post quota operation, pre-acquire/release quota from master. * * \param env - the environment passed by the caller * \param qsd - is the qsd instance attached to the OSD device which * is handling the operation. * \param qqi - is the qsd_qtype_info structure associated with the quota ID * subject to the operation * \param qid - stores information related to his ID for the operation * which has just completed * * \retval 0 - success * \retval -ve - failure */ static void qsd_op_end0(const struct lu_env *env, struct qsd_qtype_info *qqi, struct lquota_id_info *qid) { struct lquota_entry *lqe; bool adjust; ENTRY; lqe = qid->lqi_qentry; if (lqe == NULL) RETURN_EXIT; qid->lqi_qentry = NULL; /* refresh cached usage if a suitable environment is passed */ if (env != NULL) qsd_refresh_usage(env, lqe); lqe_write_lock(lqe); if (qid->lqi_space > 0) lqe->lqe_pending_write -= qid->lqi_space; if (env != NULL) adjust = qsd_adjust_needed(lqe); else adjust = true; lqe_write_unlock(lqe); if (adjust) { /* pre-acquire/release quota space is needed */ if (env != NULL) qsd_adjust(env, lqe); else /* no suitable environment, handle adjustment in * separate thread context */ qsd_adjust_schedule(lqe, false, false); } lqe_putref(lqe); EXIT; }
/* * Glimpse callback handler for per-ID quota locks. * * \param lock - is the lock targeted by the glimpse * \param data - is a pointer to the glimpse ptlrpc request */ static int qsd_id_glimpse_ast(struct ldlm_lock *lock, void *data) { struct ptlrpc_request *req = data; struct lquota_entry *lqe; struct qsd_instance *qsd; struct ldlm_gl_lquota_desc *desc; struct lquota_lvb *lvb; int rc; bool wakeup = false; ENTRY; rc = qsd_common_glimpse_ast(req, &desc, (void **)&lvb); if (rc) GOTO(out, rc); lqe = qsd_id_ast_data_get(lock, false); if (lqe == NULL) /* valid race */ GOTO(out, rc = -ELDLM_NO_LOCK_DATA); LQUOTA_DEBUG(lqe, "glimpse on quota locks, new qunit:"LPU64, desc->gl_qunit); qsd = lqe2qqi(lqe)->qqi_qsd; lqe_write_lock(lqe); lvb->lvb_id_rel = 0; if (desc->gl_qunit != 0 && desc->gl_qunit != lqe->lqe_qunit) { long long space; /* extract new qunit from glimpse request */ qsd_set_qunit(lqe, desc->gl_qunit); space = lqe->lqe_granted - lqe->lqe_pending_rel; space -= lqe->lqe_usage; space -= lqe->lqe_pending_write + lqe->lqe_waiting_write; space -= lqe->lqe_qunit; if (space > 0) { if (lqe->lqe_pending_req > 0) { LQUOTA_DEBUG(lqe, "request in flight, postpone " "release of "LPD64, space); lvb->lvb_id_may_rel = space; } else { lqe->lqe_pending_req++; /* release quota space in glimpse reply */ LQUOTA_DEBUG(lqe, "releasing "LPD64, space); lqe->lqe_granted -= space; lvb->lvb_id_rel = space; lqe_write_unlock(lqe); /* change the lqe_granted */ qsd_upd_schedule(lqe2qqi(lqe), lqe, &lqe->lqe_id, (union lquota_rec *)&lqe->lqe_granted, 0, false); lqe_write_lock(lqe); lqe->lqe_pending_req--; wakeup = true; } } } lqe->lqe_edquot = !!(desc->gl_flags & LQUOTA_FL_EDQUOT); lqe_write_unlock(lqe); if (wakeup) wake_up_all(&lqe->lqe_waiters); lqe_putref(lqe); out: req->rq_status = rc; RETURN(rc); }
/** * Blocking callback handler for per-ID lock * * \param lock - is the lock for which ast occurred. * \param desc - is the description of a conflicting lock in case of blocking * ast. * \param data - is the value of lock->l_ast_data * \param flag - LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish * cancellation and blocking ast's. */ static int qsd_id_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, void *data, int flag) { struct lustre_handle lockh; int rc = 0; ENTRY; switch(flag) { case LDLM_CB_BLOCKING: { LDLM_DEBUG(lock, "blocking AST on ID quota lock"); ldlm_lock2handle(lock, &lockh); rc = ldlm_cli_cancel(&lockh, LCF_ASYNC); break; } case LDLM_CB_CANCELING: { struct lu_env *env; struct lquota_entry *lqe; bool rel = false; LDLM_DEBUG(lock, "canceling global quota lock"); lqe = qsd_id_ast_data_get(lock, true); if (lqe == NULL) break; LQUOTA_DEBUG(lqe, "losing ID lock"); /* just local cancel (for stack clean up or eviction), don't * release quota space in this case */ if (ldlm_is_local_only(lock)) { lqe_putref(lqe); break; } /* allocate environment */ OBD_ALLOC_PTR(env); if (env == NULL) { lqe_putref(lqe); rc = -ENOMEM; break; } /* initialize environment */ rc = lu_env_init(env, LCT_DT_THREAD); if (rc) { OBD_FREE_PTR(env); lqe_putref(lqe); break; } ldlm_lock2handle(lock, &lockh); lqe_write_lock(lqe); if (lustre_handle_equal(&lockh, &lqe->lqe_lockh)) { /* Clear lqe_lockh & reset qunit to 0 */ qsd_set_qunit(lqe, 0); memset(&lqe->lqe_lockh, 0, sizeof(lqe->lqe_lockh)); lqe->lqe_edquot = false; rel = true; } lqe_write_unlock(lqe); /* If there is qqacq inflight, the release will be skipped * at this time, and triggered on dqacq completion later, * which means there could be a short window that slave is * holding spare grant wihtout per-ID lock. */ if (rel) rc = qsd_adjust(env, lqe); /* release lqe reference grabbed by qsd_id_ast_data_get() */ lqe_putref(lqe); lu_env_fini(env); OBD_FREE_PTR(env); break; } default: LASSERTF(0, "invalid flags for blocking ast %d", flag); } RETURN(rc); }
/* * Update quota settings for a given identifier. * * \param env - is the environment passed by the caller * \param qmt - is the quota master target * \param pool_id - is the 16-bit pool identifier * \param restype - is the pool type, either block (i.e. LQUOTA_RES_DT) or inode * (i.e. LQUOTA_RES_MD) * \param qtype - is the quota type * \param id - is the quota indentifier for which we want to modify quota * settings. * \param hard - is the new hard limit * \param soft - is the new soft limit * \param time - is the new grace time * \param valid - is the list of settings to change */ static int qmt_set(const struct lu_env *env, struct qmt_device *qmt, __u16 pool_id, __u8 restype, __u8 qtype, union lquota_id *id, __u64 hard, __u64 soft, __u64 time, __u32 valid) { struct qmt_thread_info *qti = qmt_info(env); struct lquota_entry *lqe; struct thandle *th = NULL; __u64 ver, now; bool dirtied = false; int rc = 0; ENTRY; /* look-up quota entry associated with this ID */ lqe = qmt_pool_lqe_lookup(env, qmt, pool_id, restype, qtype, id); if (IS_ERR(lqe)) RETURN(PTR_ERR(lqe)); /* allocate & start transaction with enough credits to update quota * settings in the global index file */ th = qmt_trans_start(env, lqe, &qti->qti_restore); if (IS_ERR(th)) GOTO(out_nolock, rc = PTR_ERR(th)); now = cfs_time_current_sec(); lqe_write_lock(lqe); LQUOTA_DEBUG(lqe, "changing quota settings valid:%x hard:"LPU64" soft:" LPU64" time:"LPU64, valid, hard, soft, time); if ((valid & QIF_TIMES) != 0 && lqe->lqe_gracetime != time) { /* change time settings */ lqe->lqe_gracetime = time; dirtied = true; } if ((valid & QIF_LIMITS) != 0 && (lqe->lqe_hardlimit != hard || lqe->lqe_softlimit != soft)) { rc = qmt_validate_limits(lqe, hard, soft); if (rc) GOTO(out, rc); /* recompute qunit in case it was never initialized */ qmt_revalidate(env, lqe); /* change quota limits */ lqe->lqe_hardlimit = hard; lqe->lqe_softlimit = soft; /* clear grace time */ if (lqe->lqe_softlimit == 0 || lqe->lqe_granted <= lqe->lqe_softlimit) /* no soft limit or below soft limit, let's clear grace * time */ lqe->lqe_gracetime = 0; else if ((valid & QIF_TIMES) == 0) /* set grace only if user hasn't provided his own */ lqe->lqe_gracetime = now + qmt_lqe_grace(lqe); /* change enforced status based on new parameters */ if (lqe->lqe_hardlimit == 0 && lqe->lqe_softlimit == 0) lqe->lqe_enforced = false; else lqe->lqe_enforced = true; dirtied = true; } if (dirtied) { /* write new quota settings to disk */ rc = qmt_glb_write(env, th, lqe, LQUOTA_BUMP_VER, &ver); if (rc) { /* restore initial quota settings */ qmt_restore(lqe, &qti->qti_restore); GOTO(out, rc); } /* compute new qunit value now that we have modified the quota * settings */ qmt_adjust_qunit(env, lqe); /* clear/set edquot flag as needed */ qmt_adjust_edquot(lqe, now); } EXIT; out: lqe_write_unlock(lqe); out_nolock: lqe_putref(lqe); if (th != NULL && !IS_ERR(th)) dt_trans_stop(env, qmt->qmt_child, th); if (rc == 0 && dirtied) qmt_glb_lock_notify(env, lqe, ver); return rc; }
/* * Handle quota request from slave. * * \param env - is the environment passed by the caller * \param ld - is the lu device associated with the qmt * \param req - is the quota acquire request */ static int qmt_dqacq(const struct lu_env *env, struct lu_device *ld, struct ptlrpc_request *req) { struct qmt_device *qmt = lu2qmt_dev(ld); struct quota_body *qbody, *repbody; struct obd_uuid *uuid; struct ldlm_lock *lock; struct lquota_entry *lqe; int pool_id, pool_type, qtype; int rc; ENTRY; qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY); if (qbody == NULL) RETURN(err_serious(-EPROTO)); repbody = req_capsule_server_get(&req->rq_pill, &RMF_QUOTA_BODY); if (repbody == NULL) RETURN(err_serious(-EFAULT)); /* verify if global lock is stale */ if (!lustre_handle_is_used(&qbody->qb_glb_lockh)) RETURN(-ENOLCK); lock = ldlm_handle2lock(&qbody->qb_glb_lockh); if (lock == NULL) RETURN(-ENOLCK); LDLM_LOCK_PUT(lock); uuid = &req->rq_export->exp_client_uuid; if (req_is_rel(qbody->qb_flags) + req_is_acq(qbody->qb_flags) + req_is_preacq(qbody->qb_flags) > 1) { CERROR("%s: malformed quota request with conflicting flags set " "(%x) from slave %s\n", qmt->qmt_svname, qbody->qb_flags, obd_uuid2str(uuid)); RETURN(-EPROTO); } if (req_is_acq(qbody->qb_flags) || req_is_preacq(qbody->qb_flags)) { /* acquire and pre-acquire should use a valid ID lock */ if (!lustre_handle_is_used(&qbody->qb_lockh)) RETURN(-ENOLCK); lock = ldlm_handle2lock(&qbody->qb_lockh); if (lock == NULL) /* no lock associated with this handle */ RETURN(-ENOLCK); LDLM_DEBUG(lock, "%sacquire request", req_is_preacq(qbody->qb_flags) ? "pre" : ""); if (!obd_uuid_equals(&lock->l_export->exp_client_uuid, uuid)) { /* sorry, no way to cheat ... */ LDLM_LOCK_PUT(lock); RETURN(-ENOLCK); } if ((lock->l_flags & LDLM_FL_AST_SENT) != 0) { struct ptlrpc_service_part *svc; unsigned int timeout; svc = req->rq_rqbd->rqbd_svcpt; timeout = at_est2timeout(at_get(&svc->scp_at_estimate)); timeout = max(timeout, ldlm_timeout); /* lock is being cancelled, prolong timeout */ ldlm_refresh_waiting_lock(lock, timeout); } LDLM_LOCK_PUT(lock); } /* extract pool & quota information from global index FID packed in the * request */ rc = lquota_extract_fid(&qbody->qb_fid, &pool_id, &pool_type, &qtype); if (rc) RETURN(-EINVAL); /* Find the quota entry associated with the quota id */ lqe = qmt_pool_lqe_lookup(env, qmt, pool_id, pool_type, qtype, &qbody->qb_id); if (IS_ERR(lqe)) RETURN(PTR_ERR(lqe)); /* process quota request */ rc = qmt_dqacq0(env, lqe, qmt, uuid, qbody->qb_flags, qbody->qb_count, qbody->qb_usage, repbody); if (lustre_handle_is_used(&qbody->qb_lockh)) /* return current qunit value only to slaves owning an per-ID * quota lock. For enqueue, the qunit value will be returned in * the LVB */ repbody->qb_qunit = lqe->lqe_qunit; lqe_putref(lqe); RETURN(rc); }
static void lqe_hash_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode) { struct lquota_entry *lqe; lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash); lqe_putref(lqe); }
/** * Quota enforcement handler. If local quota can satisfy this operation, * return success, otherwise, acquire more quota from master. * (for write operation, if master isn't available at this moment, return * -EINPROGRESS to inform client to retry the write) * * \param env - the environment passed by the caller * \param qsd - is the qsd instance associated with the device in charge * of the operation. * \param qid - is the qid information attached in the transaction handle * \param space - is the space required by the operation * \param flags - if the operation is write, return caller no user/group * and sync commit flags * * \retval 0 - success * \retval -EDQUOT - out of quota * \retval -EINPROGRESS - inform client to retry write * \retval -ve - other appropriate errors */ static int qsd_op_begin0(const struct lu_env *env, struct qsd_qtype_info *qqi, struct lquota_id_info *qid, long long space, int *flags) { struct lquota_entry *lqe; int rc, ret = -EINPROGRESS; struct l_wait_info lwi; ENTRY; if (qid->lqi_qentry != NULL) { /* we already had to deal with this id for this transaction */ lqe = qid->lqi_qentry; if (!lqe->lqe_enforced) RETURN(0); } else { /* look up lquota entry associated with qid */ lqe = lqe_locate(env, qqi->qqi_site, &qid->lqi_id); if (IS_ERR(lqe)) RETURN(PTR_ERR(lqe)); if (!lqe->lqe_enforced) { lqe_putref(lqe); RETURN(0); } qid->lqi_qentry = lqe; /* lqe will be released in qsd_op_end() */ } if (space <= 0) { /* when space is negative or null, we don't need to consume * quota space. That said, we still want to perform space * adjustments in qsd_op_end, so we return here, but with * a reference on the lqe */ if (flags != NULL) { rc = qsd_refresh_usage(env, lqe); GOTO(out_flags, rc); } RETURN(0); } LQUOTA_DEBUG(lqe, "op_begin space:"LPD64, space); lqe_write_lock(lqe); lqe->lqe_waiting_write += space; lqe_write_unlock(lqe); /* acquire quota space for the operation, cap overall wait time to * prevent a service thread from being stuck for too long */ lwi = LWI_TIMEOUT(cfs_time_seconds(qsd_wait_timeout(qqi->qqi_qsd)), NULL, NULL); rc = l_wait_event(lqe->lqe_waiters, qsd_acquire(env, lqe, space, &ret), &lwi); if (rc == 0 && ret == 0) { qid->lqi_space += space; } else { if (rc == 0) rc = ret; LQUOTA_DEBUG(lqe, "acquire quota failed:%d", rc); lqe_write_lock(lqe); lqe->lqe_waiting_write -= space; if (flags && lqe->lqe_pending_write != 0) /* Inform OSD layer that there are pending writes. * It might want to retry after a sync if appropriate */ *flags |= QUOTA_FL_SYNC; lqe_write_unlock(lqe); /* convert recoverable error into -EINPROGRESS, client will * retry */ if (rc == -ETIMEDOUT || rc == -ENOTCONN || rc == -ENOLCK || rc == -EAGAIN || rc == -EINTR) { rc = -EINPROGRESS; } else if (rc == -ESRCH) { rc = 0; LQUOTA_ERROR(lqe, "ID isn't enforced on master, it " "probably due to a legeal race, if this " "message is showing up constantly, there " "could be some inconsistence between " "master & slave, and quota reintegration " "needs be re-triggered."); } } if (flags != NULL) { out_flags: LASSERT(qid->lqi_is_blk); if (rc != 0) { *flags |= LQUOTA_OVER_FL(qqi->qqi_qtype); } else { __u64 usage; lqe_read_lock(lqe); usage = lqe->lqe_usage; usage += lqe->lqe_pending_write; usage += lqe->lqe_waiting_write; usage += qqi->qqi_qsd->qsd_sync_threshold; /* if we should notify client to start sync write */ if (usage >= lqe->lqe_granted - lqe->lqe_pending_rel) *flags |= LQUOTA_OVER_FL(qqi->qqi_qtype); else *flags &= ~LQUOTA_OVER_FL(qqi->qqi_qtype); lqe_read_unlock(lqe); } } RETURN(rc); }
/** * Callback function called when an acquire/release request sent to the master * is completed */ static void qsd_req_completion(const struct lu_env *env, struct qsd_qtype_info *qqi, struct quota_body *reqbody, struct quota_body *repbody, struct lustre_handle *lockh, struct lquota_lvb *lvb, void *arg, int ret) { struct lquota_entry *lqe = (struct lquota_entry *)arg; struct qsd_thread_info *qti; int rc; bool adjust = false, cancel = false; ENTRY; LASSERT(qqi != NULL && lqe != NULL); /* environment passed by ptlrpcd is mostly used by CLIO and hasn't the * DT tags set. */ rc = lu_env_refill_by_tags((struct lu_env *)env, LCT_DT_THREAD, 0); if (rc) { LQUOTA_ERROR(lqe, "failed to refill environmnent %d", rc); lqe_write_lock(lqe); /* can't afford to adjust quota space with no suitable lu_env */ GOTO(out_noadjust, rc); } qti = qsd_info(env); lqe_write_lock(lqe); LQUOTA_DEBUG(lqe, "DQACQ returned %d, flags:0x%x", ret, reqbody->qb_flags); /* despite -EDQUOT & -EINPROGRESS errors, the master might still * grant us back quota space to adjust quota overrun */ if (ret != 0 && ret != -EDQUOT && ret != -EINPROGRESS) { if (ret != -ETIMEDOUT && ret != -ENOTCONN && ret != -ESHUTDOWN && ret != -EAGAIN) /* print errors only if return code is unexpected */ LQUOTA_ERROR(lqe, "DQACQ failed with %d, flags:0x%x", ret, reqbody->qb_flags); GOTO(out, ret); } /* Set the lqe_lockh */ if (lustre_handle_is_used(lockh) && !lustre_handle_equal(lockh, &lqe->lqe_lockh)) lustre_handle_copy(&lqe->lqe_lockh, lockh); /* If the replied qb_count is zero, it means master didn't process * the DQACQ since the limit for this ID has been removed, so we * should not update quota entry & slave index copy neither. */ if (repbody != NULL && repbody->qb_count != 0) { LQUOTA_DEBUG(lqe, "DQACQ qb_count:"LPU64, repbody->qb_count); if (req_is_rel(reqbody->qb_flags)) { if (lqe->lqe_granted < repbody->qb_count) { LQUOTA_ERROR(lqe, "can't release more space " "than owned "LPU64"<"LPU64, lqe->lqe_granted, repbody->qb_count); lqe->lqe_granted = 0; } else { lqe->lqe_granted -= repbody->qb_count; } /* Cancel the per-ID lock initiatively when there * isn't any usage & grant, which can avoid master * sending glimpse unnecessarily to this slave on * quota revoking */ if (!lqe->lqe_pending_write && !lqe->lqe_granted && !lqe->lqe_waiting_write && !lqe->lqe_usage) cancel = true; } else { lqe->lqe_granted += repbody->qb_count; } qti->qti_rec.lqr_slv_rec.qsr_granted = lqe->lqe_granted; lqe_write_unlock(lqe); /* Update the slave index file in the dedicated thread. So far, * We don't update the version of slave index copy on DQACQ. * No locking is necessary since nobody can change * lqe->lqe_granted while lqe->lqe_pending_req > 0 */ qsd_upd_schedule(qqi, lqe, &lqe->lqe_id, &qti->qti_rec, 0, false); lqe_write_lock(lqe); } /* extract information from lvb */ if (ret == 0 && lvb != 0) { if (lvb->lvb_id_qunit != 0) qsd_set_qunit(lqe, lvb->lvb_id_qunit); qsd_set_edquot(lqe, !!(lvb->lvb_flags & LQUOTA_FL_EDQUOT)); } else if (repbody != NULL && repbody->qb_qunit != 0) { qsd_set_qunit(lqe, repbody->qb_qunit); } /* turn off pre-acquire if it failed with -EDQUOT. This is done to avoid * flooding the master with acquire request. Pre-acquire will be turned * on again as soon as qunit is modified */ if (req_is_preacq(reqbody->qb_flags) && ret == -EDQUOT) lqe->lqe_nopreacq = true; out: adjust = qsd_adjust_needed(lqe); if (reqbody && req_is_acq(reqbody->qb_flags) && ret != -EDQUOT) { lqe->lqe_acq_rc = ret; lqe->lqe_acq_time = cfs_time_current_64(); } out_noadjust: qsd_request_exit(lqe); lqe_write_unlock(lqe); /* release reference on per-ID lock */ if (lustre_handle_is_used(lockh)) ldlm_lock_decref(lockh, qsd_id_einfo.ei_mode); if (cancel) { qsd_adjust_schedule(lqe, false, true); } else if (adjust) { if (!ret || ret == -EDQUOT) qsd_adjust_schedule(lqe, false, false); else qsd_adjust_schedule(lqe, true, false); } lqe_putref(lqe); if (lvb) OBD_FREE_PTR(lvb); EXIT; }