/* * Glimpse callback handler for global quota lock. * * \param lock - is the lock targeted by the glimpse * \param data - is a pointer to the glimpse ptlrpc request */ static int qsd_glb_glimpse_ast(struct ldlm_lock *lock, void *data) { struct ptlrpc_request *req = data; struct qsd_qtype_info *qqi; struct ldlm_gl_lquota_desc *desc; struct lquota_lvb *lvb; struct lquota_glb_rec rec; int rc; ENTRY; rc = qsd_common_glimpse_ast(req, &desc, (void **)&lvb); if (rc) GOTO(out, rc); qqi = qsd_glb_ast_data_get(lock, false); if (qqi == NULL) /* valid race */ GOTO(out, rc = -ELDLM_NO_LOCK_DATA); CDEBUG(D_QUOTA, "%s: glimpse on glb quota locks, id:"LPU64" ver:"LPU64 " hard:" LPU64" soft:"LPU64"\n", qqi->qqi_qsd->qsd_svname, desc->gl_id.qid_uid, desc->gl_ver, desc->gl_hardlimit, desc->gl_softlimit); if (desc->gl_ver == 0) { CERROR("%s: invalid global index version "LPU64"\n", qqi->qqi_qsd->qsd_svname, desc->gl_ver); GOTO(out_qqi, rc = -EINVAL); } /* extract new hard & soft limits from the glimpse descriptor */ rec.qbr_hardlimit = desc->gl_hardlimit; rec.qbr_softlimit = desc->gl_softlimit; rec.qbr_time = desc->gl_time; rec.qbr_granted = 0; /* We can't afford disk io in the context of glimpse callback handling * thread, so the on-disk global limits update has to be deferred. */ qsd_upd_schedule(qqi, NULL, &desc->gl_id, (union lquota_rec *)&rec, desc->gl_ver, true); EXIT; out_qqi: lu_ref_del(&qqi->qqi_reference, "ast_data_get", lock); qqi_putref(qqi); out: req->rq_status = rc; return rc; }
/* * Glimpse callback handler for per-ID quota locks. * * \param lock - is the lock targeted by the glimpse * \param data - is a pointer to the glimpse ptlrpc request */ static int qsd_id_glimpse_ast(struct ldlm_lock *lock, void *data) { struct ptlrpc_request *req = data; struct lquota_entry *lqe; struct qsd_instance *qsd; struct ldlm_gl_lquota_desc *desc; struct lquota_lvb *lvb; int rc; bool wakeup = false; ENTRY; rc = qsd_common_glimpse_ast(req, &desc, (void **)&lvb); if (rc) GOTO(out, rc); lqe = qsd_id_ast_data_get(lock, false); if (lqe == NULL) /* valid race */ GOTO(out, rc = -ELDLM_NO_LOCK_DATA); LQUOTA_DEBUG(lqe, "glimpse on quota locks, new qunit:"LPU64, desc->gl_qunit); qsd = lqe2qqi(lqe)->qqi_qsd; lqe_write_lock(lqe); lvb->lvb_id_rel = 0; if (desc->gl_qunit != 0 && desc->gl_qunit != lqe->lqe_qunit) { long long space; /* extract new qunit from glimpse request */ qsd_set_qunit(lqe, desc->gl_qunit); space = lqe->lqe_granted - lqe->lqe_pending_rel; space -= lqe->lqe_usage; space -= lqe->lqe_pending_write + lqe->lqe_waiting_write; space -= lqe->lqe_qunit; if (space > 0) { if (lqe->lqe_pending_req > 0) { LQUOTA_DEBUG(lqe, "request in flight, postpone " "release of "LPD64, space); lvb->lvb_id_may_rel = space; } else { lqe->lqe_pending_req++; /* release quota space in glimpse reply */ LQUOTA_DEBUG(lqe, "releasing "LPD64, space); lqe->lqe_granted -= space; lvb->lvb_id_rel = space; lqe_write_unlock(lqe); /* change the lqe_granted */ qsd_upd_schedule(lqe2qqi(lqe), lqe, &lqe->lqe_id, (union lquota_rec *)&lqe->lqe_granted, 0, false); lqe_write_lock(lqe); lqe->lqe_pending_req--; wakeup = true; } } } lqe->lqe_edquot = !!(desc->gl_flags & LQUOTA_FL_EDQUOT); lqe_write_unlock(lqe); if (wakeup) wake_up_all(&lqe->lqe_waiters); lqe_putref(lqe); out: req->rq_status = rc; RETURN(rc); }
/** * Callback function called when an acquire/release request sent to the master * is completed */ static void qsd_req_completion(const struct lu_env *env, struct qsd_qtype_info *qqi, struct quota_body *reqbody, struct quota_body *repbody, struct lustre_handle *lockh, struct lquota_lvb *lvb, void *arg, int ret) { struct lquota_entry *lqe = (struct lquota_entry *)arg; struct qsd_thread_info *qti; int rc; bool adjust = false, cancel = false; ENTRY; LASSERT(qqi != NULL && lqe != NULL); /* environment passed by ptlrpcd is mostly used by CLIO and hasn't the * DT tags set. */ rc = lu_env_refill_by_tags((struct lu_env *)env, LCT_DT_THREAD, 0); if (rc) { LQUOTA_ERROR(lqe, "failed to refill environmnent %d", rc); lqe_write_lock(lqe); /* can't afford to adjust quota space with no suitable lu_env */ GOTO(out_noadjust, rc); } qti = qsd_info(env); lqe_write_lock(lqe); LQUOTA_DEBUG(lqe, "DQACQ returned %d, flags:0x%x", ret, reqbody->qb_flags); /* despite -EDQUOT & -EINPROGRESS errors, the master might still * grant us back quota space to adjust quota overrun */ if (ret != 0 && ret != -EDQUOT && ret != -EINPROGRESS) { if (ret != -ETIMEDOUT && ret != -ENOTCONN && ret != -ESHUTDOWN && ret != -EAGAIN) /* print errors only if return code is unexpected */ LQUOTA_ERROR(lqe, "DQACQ failed with %d, flags:0x%x", ret, reqbody->qb_flags); GOTO(out, ret); } /* Set the lqe_lockh */ if (lustre_handle_is_used(lockh) && !lustre_handle_equal(lockh, &lqe->lqe_lockh)) lustre_handle_copy(&lqe->lqe_lockh, lockh); /* If the replied qb_count is zero, it means master didn't process * the DQACQ since the limit for this ID has been removed, so we * should not update quota entry & slave index copy neither. */ if (repbody != NULL && repbody->qb_count != 0) { LQUOTA_DEBUG(lqe, "DQACQ qb_count:"LPU64, repbody->qb_count); if (req_is_rel(reqbody->qb_flags)) { if (lqe->lqe_granted < repbody->qb_count) { LQUOTA_ERROR(lqe, "can't release more space " "than owned "LPU64"<"LPU64, lqe->lqe_granted, repbody->qb_count); lqe->lqe_granted = 0; } else { lqe->lqe_granted -= repbody->qb_count; } /* Cancel the per-ID lock initiatively when there * isn't any usage & grant, which can avoid master * sending glimpse unnecessarily to this slave on * quota revoking */ if (!lqe->lqe_pending_write && !lqe->lqe_granted && !lqe->lqe_waiting_write && !lqe->lqe_usage) cancel = true; } else { lqe->lqe_granted += repbody->qb_count; } qti->qti_rec.lqr_slv_rec.qsr_granted = lqe->lqe_granted; lqe_write_unlock(lqe); /* Update the slave index file in the dedicated thread. So far, * We don't update the version of slave index copy on DQACQ. * No locking is necessary since nobody can change * lqe->lqe_granted while lqe->lqe_pending_req > 0 */ qsd_upd_schedule(qqi, lqe, &lqe->lqe_id, &qti->qti_rec, 0, false); lqe_write_lock(lqe); } /* extract information from lvb */ if (ret == 0 && lvb != 0) { if (lvb->lvb_id_qunit != 0) qsd_set_qunit(lqe, lvb->lvb_id_qunit); qsd_set_edquot(lqe, !!(lvb->lvb_flags & LQUOTA_FL_EDQUOT)); } else if (repbody != NULL && repbody->qb_qunit != 0) { qsd_set_qunit(lqe, repbody->qb_qunit); } /* turn off pre-acquire if it failed with -EDQUOT. This is done to avoid * flooding the master with acquire request. Pre-acquire will be turned * on again as soon as qunit is modified */ if (req_is_preacq(reqbody->qb_flags) && ret == -EDQUOT) lqe->lqe_nopreacq = true; out: adjust = qsd_adjust_needed(lqe); if (reqbody && req_is_acq(reqbody->qb_flags) && ret != -EDQUOT) { lqe->lqe_acq_rc = ret; lqe->lqe_acq_time = cfs_time_current_64(); } out_noadjust: qsd_request_exit(lqe); lqe_write_unlock(lqe); /* release reference on per-ID lock */ if (lustre_handle_is_used(lockh)) ldlm_lock_decref(lockh, qsd_id_einfo.ei_mode); if (cancel) { qsd_adjust_schedule(lqe, false, true); } else if (adjust) { if (!ret || ret == -EDQUOT) qsd_adjust_schedule(lqe, false, false); else qsd_adjust_schedule(lqe, true, false); } lqe_putref(lqe); if (lvb) OBD_FREE_PTR(lvb); EXIT; }