示例#1
0
/**
 * Check whether a qsd instance is all set to send quota request to master.
 * This includes checking whether:
 * - the connection to master is set up and usable,
 * - the qsd isn't stopping
 * - reintegration has been successfully completed and all indexes are
 *   up-to-date
 *
 * \param lqe - is the lquota entry for which we would like to send an quota
 *              request
 * \param lockh - is the remote handle of the global lock returned on success
 *
 * \retval 0 on success, appropriate error on failure
 */
static int qsd_ready(struct lquota_entry *lqe, struct lustre_handle *lockh)
{
	struct qsd_qtype_info	*qqi = lqe2qqi(lqe);
	struct qsd_instance	*qsd = qqi->qqi_qsd;
	struct obd_import	*imp = NULL;
	struct ldlm_lock	*lock;
	ENTRY;

	read_lock(&qsd->qsd_lock);
	/* is the qsd about to shut down? */
	if (qsd->qsd_stopping) {
		read_unlock(&qsd->qsd_lock);
		LQUOTA_DEBUG(lqe, "dropping quota req since qsd is stopping");
		/* Target is about to shut down, client will retry */
		RETURN(-EINPROGRESS);
	}

	/* is the connection to the quota master ready? */
	if (qsd->qsd_exp_valid)
		imp = class_exp2cliimp(qsd->qsd_exp);
	if (imp == NULL || imp->imp_invalid) {
		read_unlock(&qsd->qsd_lock);
		LQUOTA_DEBUG(lqe, "connection to master not ready");
		RETURN(-ENOTCONN);
	}

	/* In most case, reintegration must have been triggered (when enable
	 * quota or on OST start), however, in rare race condition (enabling
	 * quota when starting OSTs), we might miss triggering reintegration
	 * for some qqi.
	 *
	 * If the previous reintegration failed for some reason, we'll
	 * re-trigger it here as well. */
	if (!qqi->qqi_glb_uptodate || !qqi->qqi_slv_uptodate) {
		read_unlock(&qsd->qsd_lock);
		LQUOTA_DEBUG(lqe, "not up-to-date, dropping request and "
			     "kicking off reintegration");
		qsd_start_reint_thread(qqi);
		RETURN(-EINPROGRESS);
	}

	/* Fill the remote global lock handle, master will check this handle
	 * to see if the slave is sending request with stale lock */
	lustre_handle_copy(lockh, &qqi->qqi_lockh);
	read_unlock(&qsd->qsd_lock);

	if (!lustre_handle_is_used(lockh))
		RETURN(-ENOLCK);

	lock = ldlm_handle2lock(lockh);
	if (lock == NULL)
		RETURN(-ENOLCK);

	/* return remote lock handle to be packed in quota request */
	lustre_handle_copy(lockh, &lock->l_remote_handle);
	LDLM_LOCK_PUT(lock);

	RETURN(0);
}
示例#2
0
文件: qsd_lock.c 项目: Lezval/lustre
int qsd_id_lock_cancel(const struct lu_env *env, struct lquota_entry *lqe)
{
	struct qsd_thread_info	*qti = qsd_info(env);
	int			 rc;
	ENTRY;

	lqe_write_lock(lqe);
	if (lqe->lqe_pending_write || lqe->lqe_waiting_write ||
	    lqe->lqe_usage || lqe->lqe_granted) {
		lqe_write_unlock(lqe);
		RETURN(0);
	}

	lustre_handle_copy(&qti->qti_lockh, &lqe->lqe_lockh);
	if (lustre_handle_is_used(&qti->qti_lockh)) {
		memset(&lqe->lqe_lockh, 0, sizeof(lqe->lqe_lockh));
		qsd_set_qunit(lqe, 0);
		lqe->lqe_edquot = false;
	}
	lqe_write_unlock(lqe);

	rc = qsd_id_lock_match(&qti->qti_lockh, NULL);
	if (rc)
		RETURN(rc);

	ldlm_lock_decref_and_cancel(&qti->qti_lockh, qsd_id_einfo.ei_mode);
	RETURN(0);
}
示例#3
0
文件: qsd_lock.c 项目: Lezval/lustre
/**
 * Check whether a slave already own a ldlm lock for the quota identifier \qid.
 *
 * \param lockh  - is the local lock handle from lquota entry.
 * \param rlockh - is the remote lock handle of the matched lock, if any.
 *
 * \retval 0      : on successful look up and \lockh contains the lock handle.
 * \retval -ENOENT: no lock found
 */
int qsd_id_lock_match(struct lustre_handle *lockh, struct lustre_handle *rlockh)
{
	struct ldlm_lock	*lock;
	int			 rc;
	ENTRY;

	LASSERT(lockh);

	if (!lustre_handle_is_used(lockh))
		RETURN(-ENOENT);

	rc = ldlm_lock_addref_try(lockh, qsd_id_einfo.ei_mode);
	if (rc)
		RETURN(-ENOENT);

	LASSERT(lustre_handle_is_used(lockh));
	ldlm_lock_dump_handle(D_QUOTA, lockh);

	if (rlockh == NULL)
		/* caller not interested in remote handle */
		RETURN(0);

	/* look up lock associated with local handle and extract remote handle
	 * to be packed in quota request */
	lock = ldlm_handle2lock(lockh);
	LASSERT(lock != NULL);
	lustre_handle_copy(rlockh, &lock->l_remote_handle);
	LDLM_LOCK_PUT(lock);

	RETURN(0);
}
示例#4
0
文件: osc_lock.c 项目: AK101111/linux
static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
			     struct lustre_handle *lockh, bool lvb_update)
{
	struct ldlm_lock *dlmlock;

	dlmlock = ldlm_handle2lock_long(lockh, 0);
	LASSERT(dlmlock);

	/* lock reference taken by ldlm_handle2lock_long() is
	 * owned by osc_lock and released in osc_lock_detach()
	 */
	lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl);
	oscl->ols_has_ref = 1;

	LASSERT(!oscl->ols_dlmlock);
	oscl->ols_dlmlock = dlmlock;

	/* This may be a matched lock for glimpse request, do not hold
	 * lock reference in that case.
	 */
	if (!oscl->ols_glimpse) {
		/* hold a refc for non glimpse lock which will
		 * be released in osc_lock_cancel()
		 */
		lustre_handle_copy(&oscl->ols_handle, lockh);
		ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode);
		oscl->ols_hold = 1;
	}

	/* Lock must have been granted. */
	lock_res_and_lock(dlmlock);
	if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
		struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent;
		struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;

		/* extend the lock extent, otherwise it will have problem when
		 * we decide whether to grant a lockless lock.
		 */
		descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
		descr->cld_start = cl_index(descr->cld_obj, ext->start);
		descr->cld_end = cl_index(descr->cld_obj, ext->end);
		descr->cld_gid = ext->gid;

		/* no lvb update for matched lock */
		if (lvb_update) {
			LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
			osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj),
					    dlmlock, NULL);
		}
		LINVRNT(osc_lock_invariant(oscl));
	}
	unlock_res_and_lock(dlmlock);

	LASSERT(oscl->ols_state != OLS_GRANTED);
	oscl->ols_state = OLS_GRANTED;
}
示例#5
0
/*
 * Send non-intent quota request to master.
 *
 * \param env    - the environment passed by the caller
 * \param exp    - is the export to use to send the acquire RPC
 * \param qbody  - quota body to be packed in request
 * \param sync   - synchronous or asynchronous
 * \param completion - completion callback
 * \param qqi    - is the qsd_qtype_info structure to pass to the completion
 *                 function
 * \param lqe    - is the qid entry to be processed
 *
 * \retval 0     - success
 * \retval -ve   - appropriate errors
 */
int qsd_send_dqacq(const struct lu_env *env, struct obd_export *exp,
		   struct quota_body *qbody, bool sync,
		   qsd_req_completion_t completion, struct qsd_qtype_info *qqi,
		   struct lustre_handle *lockh, struct lquota_entry *lqe)
{
	struct ptlrpc_request	*req;
	struct quota_body	*req_qbody;
	struct qsd_async_args	*aa;
	int			 rc;
	ENTRY;

	LASSERT(exp);

	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_QUOTA_DQACQ);
	if (req == NULL)
		GOTO(out, rc = -ENOMEM);

	req->rq_no_resend = req->rq_no_delay = 1;
	req->rq_no_retry_einprogress = 1;
	rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, QUOTA_DQACQ);
	if (rc) {
		ptlrpc_request_free(req);
		GOTO(out, rc);
	}

	req->rq_request_portal = MDS_READPAGE_PORTAL;
	req_qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY);
	*req_qbody = *qbody;

	ptlrpc_request_set_replen(req);

	CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
	aa = ptlrpc_req_async_args(req);
	aa->aa_exp = exp;
	aa->aa_qqi = qqi;
	aa->aa_arg = (void *)lqe;
	aa->aa_completion = completion;
	lustre_handle_copy(&aa->aa_lockh, lockh);

	if (sync) {
		rc = ptlrpc_queue_wait(req);
		rc = qsd_dqacq_interpret(env, req, aa, rc);
		ptlrpc_req_finished(req);
	} else {
		req->rq_interpret_reply = qsd_dqacq_interpret;
		ptlrpcd_add_req(req);
	}

	RETURN(rc);
out:
	completion(env, qqi, qbody, NULL, lockh, NULL, lqe, rc);
	return rc;
}
示例#6
0
/*
 * Get intent per-ID lock or global-index lock from master.
 *
 * \param env    - the environment passed by the caller
 * \param exp    - is the export to use to send the intent RPC
 * \param qbody  - quota body to be packed in request
 * \param sync   - synchronous or asynchronous (pre-acquire)
 * \param it_op  - IT_QUOTA_DQACQ or IT_QUOTA_CONN
 * \param completion - completion callback
 * \param qqi    - is the qsd_qtype_info structure to pass to the completion
 *                 function
 * \param lvb    - is the lvb associated with the lock and returned by the
 *                 server
 * \param arg    - is an opaq argument passed to the completion callback
 *
 * \retval 0     - success
 * \retval -ve   - appropriate errors
 */
int qsd_intent_lock(const struct lu_env *env, struct obd_export *exp,
		    struct quota_body *qbody, bool sync, int it_op,
		    qsd_req_completion_t completion, struct qsd_qtype_info *qqi,
		    struct lquota_lvb *lvb, void *arg)
{
	struct qsd_thread_info	*qti = qsd_info(env);
	struct ptlrpc_request	*req;
	struct qsd_async_args	*aa = NULL;
	struct ldlm_intent	*lit;
	struct quota_body	*req_qbody;
	__u64			 flags = LDLM_FL_HAS_INTENT;
	int			 rc;
	ENTRY;

	LASSERT(exp != NULL);
	LASSERT(!lustre_handle_is_used(&qbody->qb_lockh));

	memset(&qti->qti_lockh, 0, sizeof(qti->qti_lockh));

	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
				   &RQF_LDLM_INTENT_QUOTA);
	if (req == NULL)
		GOTO(out, rc = -ENOMEM);

	req->rq_no_retry_einprogress = 1;
	rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
	if (rc) {
		ptlrpc_request_free(req);
		GOTO(out, rc);
	}

	lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
	lit->opc = (__u64)it_op;

	req_qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY);
	*req_qbody = *qbody;

	req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
			     sizeof(*lvb));
	ptlrpc_request_set_replen(req);

	switch(it_op) {
	case IT_QUOTA_CONN:
		/* build resource name associated with global index */
		fid_build_reg_res_name(&qbody->qb_fid, &qti->qti_resid);

		/* copy einfo template and fill ei_cbdata with qqi pointer */
		memcpy(&qti->qti_einfo, &qsd_glb_einfo, sizeof(qti->qti_einfo));
		qti->qti_einfo.ei_cbdata = qqi;

		/* don't cancel global lock on memory pressure */
		flags |= LDLM_FL_NO_LRU;
		break;
	case IT_QUOTA_DQACQ:
		/* build resource name associated for per-ID quota lock */
		fid_build_quota_res_name(&qbody->qb_fid, &qbody->qb_id,
					 &qti->qti_resid);

		/* copy einfo template and fill ei_cbdata with lqe pointer */
		memcpy(&qti->qti_einfo, &qsd_id_einfo, sizeof(qti->qti_einfo));
		qti->qti_einfo.ei_cbdata = arg;
		break;
	default:
		LASSERTF(0, "invalid it_op %d", it_op);
	}

	/* build lock enqueue request */
	rc = ldlm_cli_enqueue(exp, &req, &qti->qti_einfo, &qti->qti_resid, NULL,
			      &flags, (void *)lvb, sizeof(*lvb), LVB_T_LQUOTA,
			      &qti->qti_lockh, 1);
	if (rc < 0) {
		ptlrpc_req_finished(req);
		GOTO(out, rc);
	}

	/* grab reference on backend structure for the new lock */
	switch(it_op) {
	case IT_QUOTA_CONN:
		/* grab reference on qqi for new lock */
#ifdef USE_LU_REF
	{
		struct ldlm_lock	*lock;

		lock = ldlm_handle2lock(&qti->qti_lockh);
		if (lock == NULL) {
			ptlrpc_req_finished(req);
			GOTO(out, rc = -ENOLCK);
		}
		lu_ref_add(&qqi->qqi_reference, "glb_lock", lock);
		LDLM_LOCK_PUT(lock);
	}
#endif
		qqi_getref(qqi);
		break;
	case IT_QUOTA_DQACQ:
		/* grab reference on lqe for new lock */
		lqe_getref((struct lquota_entry *)arg);
		/* all acquire/release request are sent with no_resend and
		 * no_delay flag */
		req->rq_no_resend = req->rq_no_delay = 1;
		break;
	default:
		break;
	}

	CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
	aa = ptlrpc_req_async_args(req);
	aa->aa_exp = exp;
	aa->aa_qqi = qqi;
	aa->aa_arg = arg;
	aa->aa_lvb = lvb;
	aa->aa_completion = completion;
	lustre_handle_copy(&aa->aa_lockh, &qti->qti_lockh);

	if (sync) {
		/* send lock enqueue request and wait for completion */
		rc = ptlrpc_queue_wait(req);
		rc = qsd_intent_interpret(env, req, aa, rc);
		ptlrpc_req_finished(req);
	} else {
		/* queue lock request and return */
		req->rq_interpret_reply = qsd_intent_interpret;
		ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
	}

	RETURN(rc);
out:
	completion(env, qqi, qbody, NULL, &qti->qti_lockh, lvb, arg, rc);
	return rc;
}
示例#7
0
/**
 * Adjust quota space (by acquiring or releasing) hold by the quota slave.
 * This function is called after each quota request completion and during
 * reintegration in order to report usage or re-acquire quota locks.
 * Space adjustment is aborted if there is already a quota request in flight
 * for this ID.
 *
 * \param env    - the environment passed by the caller
 * \param lqe    - is the qid entry to be processed
 *
 * \retval 0 on success, appropriate errors on failure
 */
int qsd_adjust(const struct lu_env *env, struct lquota_entry *lqe)
{
	struct qsd_thread_info	*qti = qsd_info(env);
	struct quota_body	*qbody = &qti->qti_body;
	struct qsd_instance	*qsd;
	struct qsd_qtype_info	*qqi;
	int			 rc;
	bool			 intent = false;
	ENTRY;

	memset(qbody, 0, sizeof(*qbody));
	rc = qsd_ready(lqe, &qbody->qb_glb_lockh);
	if (rc) {
		/* add to adjust list again to trigger adjustment later when
		 * slave is ready */
		LQUOTA_DEBUG(lqe, "delaying adjustment since qsd isn't ready");
		qsd_adjust_schedule(lqe, true, false);
		RETURN(0);
	}

	qqi = lqe2qqi(lqe);
	qsd = qqi->qqi_qsd;

	lqe_write_lock(lqe);

	/* fill qb_count & qb_flags */
	if (!qsd_calc_adjust(lqe, qbody)) {
		lqe_write_unlock(lqe);
		LQUOTA_DEBUG(lqe, "no adjustment required");
		RETURN(0);
	}

	/* only 1 quota request in flight for a given ID is allowed */
	rc = qsd_request_enter(lqe);
	if (rc) {
		/* already a request in flight, space adjustment will be run
		 * again on request completion */
		lqe_write_unlock(lqe);
		RETURN(0);
	}

	if (req_is_rel(qbody->qb_flags))
		lqe->lqe_pending_rel = qbody->qb_count;
	lustre_handle_copy(&qti->qti_lockh, &lqe->lqe_lockh);
	lqe_write_unlock(lqe);

	/* hold a refcount until completion */
	lqe_getref(lqe);

	/* fill other quota body fields */
	qbody->qb_fid = qqi->qqi_fid;
	qbody->qb_id  = lqe->lqe_id;

	if (req_is_acq(qbody->qb_flags) || req_is_preacq(qbody->qb_flags)) {
		/* check whether we own a valid lock for this ID */
		rc = qsd_id_lock_match(&qti->qti_lockh, &qbody->qb_lockh);
		if (rc) {
			memset(&qti->qti_lockh, 0, sizeof(qti->qti_lockh));
			if (req_is_preacq(qbody->qb_flags)) {
				if (req_has_rep(qbody->qb_flags))
					/* still want to report usage */
					qbody->qb_flags = QUOTA_DQACQ_FL_REPORT;
				else
					/* no pre-acquire if no per-ID lock */
					GOTO(out, rc = -ENOLCK);
			} else {
				/* no lock found, should use intent */
				intent = true;
			}
		} else if (req_is_acq(qbody->qb_flags) &&
			   qbody->qb_count == 0) {
			/* found cached lock, no need to acquire */
			GOTO(out, rc = 0);
		}
	} else {
		/* release and report don't need a per-ID lock */
		memset(&qti->qti_lockh, 0, sizeof(qti->qti_lockh));
	}

	if (!intent) {
		rc = qsd_send_dqacq(env, qsd->qsd_exp, qbody, false,
				    qsd_req_completion, qqi, &qti->qti_lockh,
				    lqe);
	} else {
		struct lquota_lvb *lvb;

		OBD_ALLOC_PTR(lvb);
		if (lvb == NULL)
			GOTO(out, rc = -ENOMEM);

		rc = qsd_intent_lock(env, qsd->qsd_exp, qbody, false,
				     IT_QUOTA_DQACQ, qsd_req_completion,
				     qqi, lvb, (void *)lqe);
	}
	/* the completion function will be called by qsd_send_dqacq or
	 * qsd_intent_lock */
	RETURN(rc);
out:
	qsd_req_completion(env, qqi, qbody, NULL, &qti->qti_lockh, NULL, lqe,
			   rc);
	return rc;
}
示例#8
0
/**
 * Acquire quota space from master.
 * There are at most 1 in-flight dqacq/dqrel.
 *
 * \param env    - the environment passed by the caller
 * \param lqe    - is the qid entry to be processed
 *
 * \retval 0            - success
 * \retval -EDQUOT      - out of quota
 * \retval -EINPROGRESS - inform client to retry write/create
 * \retval -EBUSY       - already a quota request in flight
 * \retval -ve          - other appropriate errors
 */
static int qsd_acquire_remote(const struct lu_env *env,
			      struct lquota_entry *lqe)
{
	struct qsd_thread_info	*qti = qsd_info(env);
	struct quota_body	*qbody = &qti->qti_body;
	struct qsd_instance	*qsd;
	struct qsd_qtype_info	*qqi;
	int			 rc;
	ENTRY;

	memset(qbody, 0, sizeof(*qbody));
	rc = qsd_ready(lqe, &qbody->qb_glb_lockh);
	if (rc)
		RETURN(rc);

	qqi = lqe2qqi(lqe);
	qsd = qqi->qqi_qsd;

	lqe_write_lock(lqe);

	/* is quota really enforced for this id? */
	if (!lqe->lqe_enforced) {
		lqe_write_unlock(lqe);
		LQUOTA_DEBUG(lqe, "quota not enforced any more");
		RETURN(0);
	}

	/* fill qb_count & qb_flags */
	if (!qsd_calc_acquire(lqe, qbody)) {
		lqe_write_unlock(lqe);
		LQUOTA_DEBUG(lqe, "No acquire required");
		RETURN(0);
	}

	/* check whether an acquire request completed recently */
	if (lqe->lqe_acq_rc != 0 &&
	    cfs_time_before_64(cfs_time_shift_64(-1), lqe->lqe_acq_time)) {
		lqe_write_unlock(lqe);
		LQUOTA_DEBUG(lqe, "using cached return code %d", lqe->lqe_acq_rc);
		RETURN(lqe->lqe_acq_rc);
	}

	/* only 1 quota request in flight for a given ID is allowed */
	rc = qsd_request_enter(lqe);
	if (rc) {
		lqe_write_unlock(lqe);
		RETURN(rc);
	}

	lustre_handle_copy(&qti->qti_lockh, &lqe->lqe_lockh);
	lqe_write_unlock(lqe);

	/* hold a refcount until completion */
	lqe_getref(lqe);

	/* fill other quota body fields */
	qbody->qb_fid = qqi->qqi_fid;
	qbody->qb_id  = lqe->lqe_id;

	/* check whether we already own a valid lock for this ID */
	rc = qsd_id_lock_match(&qti->qti_lockh, &qbody->qb_lockh);
	if (rc) {
		struct lquota_lvb *lvb;

		OBD_ALLOC_PTR(lvb);
		if (lvb == NULL) {
			rc = -ENOMEM;
			qsd_req_completion(env, qqi, qbody, NULL,
					   &qti->qti_lockh, NULL, lqe, rc);
			RETURN(rc);
		}
		/* no lock found, should use intent */
		rc = qsd_intent_lock(env, qsd->qsd_exp, qbody, true,
				     IT_QUOTA_DQACQ, qsd_req_completion,
				     qqi, lvb, (void *)lqe);
	} else {
		/* lock found, should use regular dqacq */
		rc = qsd_send_dqacq(env, qsd->qsd_exp, qbody, true,
				    qsd_req_completion, qqi, &qti->qti_lockh,
				    lqe);
	}

	/* the completion function will be called by qsd_send_dqacq or
	 * qsd_intent_lock */
	RETURN(rc);
}
示例#9
0
/**
 * Callback function called when an acquire/release request sent to the master
 * is completed
 */
static void qsd_req_completion(const struct lu_env *env,
			       struct qsd_qtype_info *qqi,
			       struct quota_body *reqbody,
			       struct quota_body *repbody,
			       struct lustre_handle *lockh,
			       struct lquota_lvb *lvb,
			       void *arg, int ret)
{
	struct lquota_entry	*lqe = (struct lquota_entry *)arg;
	struct qsd_thread_info	*qti;
	int			 rc;
	bool			 adjust = false, cancel = false;
	ENTRY;

	LASSERT(qqi != NULL && lqe != NULL);

	/* environment passed by ptlrpcd is mostly used by CLIO and hasn't the
	 * DT tags set. */
	rc = lu_env_refill_by_tags((struct lu_env *)env, LCT_DT_THREAD, 0);
	if (rc) {
		LQUOTA_ERROR(lqe, "failed to refill environmnent %d", rc);
		lqe_write_lock(lqe);
		/* can't afford to adjust quota space with no suitable lu_env */
		GOTO(out_noadjust, rc);
	}
	qti = qsd_info(env);

	lqe_write_lock(lqe);
	LQUOTA_DEBUG(lqe, "DQACQ returned %d, flags:0x%x", ret,
		     reqbody->qb_flags);

	/* despite -EDQUOT & -EINPROGRESS errors, the master might still
	 * grant us back quota space to adjust quota overrun */
	if (ret != 0 && ret != -EDQUOT && ret != -EINPROGRESS) {
		if (ret != -ETIMEDOUT && ret != -ENOTCONN &&
		   ret != -ESHUTDOWN && ret != -EAGAIN)
			/* print errors only if return code is unexpected */
			LQUOTA_ERROR(lqe, "DQACQ failed with %d, flags:0x%x",
				     ret, reqbody->qb_flags);
		GOTO(out, ret);
	}

	/* Set the lqe_lockh */
	if (lustre_handle_is_used(lockh) &&
	    !lustre_handle_equal(lockh, &lqe->lqe_lockh))
		lustre_handle_copy(&lqe->lqe_lockh, lockh);

	/* If the replied qb_count is zero, it means master didn't process
	 * the DQACQ since the limit for this ID has been removed, so we
	 * should not update quota entry & slave index copy neither. */
	if (repbody != NULL && repbody->qb_count != 0) {
		LQUOTA_DEBUG(lqe, "DQACQ qb_count:"LPU64, repbody->qb_count);

		if (req_is_rel(reqbody->qb_flags)) {
			if (lqe->lqe_granted < repbody->qb_count) {
				LQUOTA_ERROR(lqe, "can't release more space "
					     "than owned "LPU64"<"LPU64,
					     lqe->lqe_granted,
					     repbody->qb_count);
				lqe->lqe_granted = 0;
			} else {
				lqe->lqe_granted -= repbody->qb_count;
			}
			/* Cancel the per-ID lock initiatively when there
			 * isn't any usage & grant, which can avoid master
			 * sending glimpse unnecessarily to this slave on
			 * quota revoking */
			if (!lqe->lqe_pending_write && !lqe->lqe_granted &&
			    !lqe->lqe_waiting_write && !lqe->lqe_usage)
				cancel = true;
		} else {
			lqe->lqe_granted += repbody->qb_count;
		}
		qti->qti_rec.lqr_slv_rec.qsr_granted = lqe->lqe_granted;
		lqe_write_unlock(lqe);

		/* Update the slave index file in the dedicated thread. So far,
		 * We don't update the version of slave index copy on DQACQ.
		 * No locking is necessary since nobody can change
		 * lqe->lqe_granted while lqe->lqe_pending_req > 0 */
		qsd_upd_schedule(qqi, lqe, &lqe->lqe_id, &qti->qti_rec, 0,
				 false);
		lqe_write_lock(lqe);
	}

	/* extract information from lvb */
	if (ret == 0 && lvb != 0) {
		if (lvb->lvb_id_qunit != 0)
			qsd_set_qunit(lqe, lvb->lvb_id_qunit);
		qsd_set_edquot(lqe, !!(lvb->lvb_flags & LQUOTA_FL_EDQUOT));
	} else if (repbody != NULL && repbody->qb_qunit != 0) {
		qsd_set_qunit(lqe, repbody->qb_qunit);
	}

	/* turn off pre-acquire if it failed with -EDQUOT. This is done to avoid
	 * flooding the master with acquire request. Pre-acquire will be turned
	 * on again as soon as qunit is modified */
	if (req_is_preacq(reqbody->qb_flags) && ret == -EDQUOT)
		lqe->lqe_nopreacq = true;
out:
	adjust = qsd_adjust_needed(lqe);
	if (reqbody && req_is_acq(reqbody->qb_flags) && ret != -EDQUOT) {
		lqe->lqe_acq_rc = ret;
		lqe->lqe_acq_time = cfs_time_current_64();
	}
out_noadjust:
	qsd_request_exit(lqe);
	lqe_write_unlock(lqe);

	/* release reference on per-ID lock */
	if (lustre_handle_is_used(lockh))
		ldlm_lock_decref(lockh, qsd_id_einfo.ei_mode);

	if (cancel) {
		qsd_adjust_schedule(lqe, false, true);
	} else if (adjust) {
		if (!ret || ret == -EDQUOT)
			qsd_adjust_schedule(lqe, false, false);
		else
			qsd_adjust_schedule(lqe, true, false);
	}
	lqe_putref(lqe);

	if (lvb)
		OBD_FREE_PTR(lvb);
	EXIT;
}