/* * Consult current disk space consumed by a given identifier. * * \param env - the environment passed by the caller * \param qqi - is the pointer to the qsd_qtype_info structure associated * with the identifier. * \param lqe - is the quota entry associated with the identifier */ int qsd_refresh_usage(const struct lu_env *env, struct lquota_entry *lqe) { struct qsd_thread_info *qti = qsd_info(env); struct lquota_acct_rec *rec = &qti->qti_acct_rec; struct qsd_qtype_info *qqi = lqe2qqi(lqe); int rc = 0; ENTRY; LASSERT(qqi->qqi_acct_obj); /* read disk usage */ rc = lquota_disk_read(env, qqi->qqi_acct_obj, &lqe->lqe_id, (struct dt_rec *)rec); switch(rc) { case -ENOENT: lqe->lqe_usage = 0; rc = 0; break; case 0: if (qqi->qqi_qsd->qsd_is_md) lqe->lqe_usage = rec->ispace; else lqe->lqe_usage = toqb(rec->bspace); break; default: LQUOTA_ERROR(lqe, "failed to read disk usage, rc:%d", rc); RETURN(rc); } LQUOTA_DEBUG(lqe, "disk usage: "LPU64, lqe->lqe_usage); RETURN(0); }
int qsd_id_lock_cancel(const struct lu_env *env, struct lquota_entry *lqe) { struct qsd_thread_info *qti = qsd_info(env); int rc; ENTRY; lqe_write_lock(lqe); if (lqe->lqe_pending_write || lqe->lqe_waiting_write || lqe->lqe_usage || lqe->lqe_granted) { lqe_write_unlock(lqe); RETURN(0); } lustre_handle_copy(&qti->qti_lockh, &lqe->lqe_lockh); if (lustre_handle_is_used(&qti->qti_lockh)) { memset(&lqe->lqe_lockh, 0, sizeof(lqe->lqe_lockh)); qsd_set_qunit(lqe, 0); lqe->lqe_edquot = false; } lqe_write_unlock(lqe); rc = qsd_id_lock_match(&qti->qti_lockh, NULL); if (rc) RETURN(rc); ldlm_lock_decref_and_cancel(&qti->qti_lockh, qsd_id_einfo.ei_mode); RETURN(0); }
/* * Initialize on-disk structures in order to manage quota enforcement for * the target associated with the qsd instance \qsd and starts the reintegration * procedure for each quota type as soon as possible. * The last step of the reintegration will be completed once qsd_start() is * called, at which points the space reconciliation with the master will be * executed. * This function must be called when the server stack is fully configured, * typically when ->ldo_prepare is called across the stack. * * \param env - the environment passed by the caller * \param qsd - is qsd_instance to prepare * * \retval - 0 on success, appropriate error on failure */ int qsd_prepare(const struct lu_env *env, struct qsd_instance *qsd) { struct qsd_thread_info *qti = qsd_info(env); int qtype, rc = 0; ENTRY; if (unlikely(qsd == NULL)) RETURN(0); read_lock(&qsd->qsd_lock); if (qsd->qsd_prepared) { CERROR("%s: qsd instance already prepared\n", qsd->qsd_svname); rc = -EALREADY; } read_unlock(&qsd->qsd_lock); if (rc) RETURN(rc); /* Record whether this qsd instance is managing quota enforcement for a * MDT (i.e. inode quota) or OST (block quota) */ if (lu_device_is_md(qsd->qsd_dev->dd_lu_dev.ld_site->ls_top_dev)) { qsd->qsd_is_md = true; qsd->qsd_sync_threshold = LQUOTA_LEAST_QUNIT(LQUOTA_RES_MD); } else { qsd->qsd_sync_threshold = LQUOTA_LEAST_QUNIT(LQUOTA_RES_DT); } /* look-up on-disk directory for the quota slave */ qsd->qsd_root = lquota_disk_dir_find_create(env, qsd->qsd_dev, NULL, QSD_DIR); if (IS_ERR(qsd->qsd_root)) { rc = PTR_ERR(qsd->qsd_root); qsd->qsd_root = NULL; CERROR("%s: failed to create quota slave root dir (%d)\n", qsd->qsd_svname, rc); RETURN(rc); } /* initialize per-quota type data */ for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++) { rc = qsd_qtype_init(env, qsd, qtype); if (rc) RETURN(rc); } /* pools successfully setup, mark the qsd as prepared */ write_lock(&qsd->qsd_lock); qsd->qsd_prepared = true; write_unlock(&qsd->qsd_lock); /* start reintegration thread for each type, if required */ for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++) { struct qsd_qtype_info *qqi = qsd->qsd_type_array[qtype]; if (qsd_type_enabled(qsd, qtype) && qsd->qsd_acct_failed) { LCONSOLE_ERROR("%s: can't enable quota enforcement " "since space accounting isn't functional" ". Please run tunefs.lustre --quota on " "an unmounted filesystem if not done " "already\n", qsd->qsd_svname); break; } rc = qsd_start_reint_thread(qqi); if (rc) { CERROR("%s: failed to start reint thread for type %s " "(%d)\n", qsd->qsd_svname, QTYPE_NAME(qtype), rc); RETURN(rc); } } /* start writeback thread */ rc = qsd_start_upd_thread(qsd); if (rc) { CERROR("%s: failed to start writeback thread (%d)\n", qsd->qsd_svname, rc); RETURN(rc); } /* generate osp name */ rc = tgt_name2lwp_name(qsd->qsd_svname, qti->qti_buf, MTI_NAME_MAXLEN, 0); if (rc) { CERROR("%s: failed to generate ospname (%d)\n", qsd->qsd_svname, rc); RETURN(rc); } /* the connection callback will start the reintegration * procedure if quota is enabled */ rc = lustre_register_lwp_item(qti->qti_buf, &qsd->qsd_exp, qsd_conn_callback, (void *)qsd); if (rc) { CERROR("%s: fail to get connection to master (%d)\n", qsd->qsd_svname, rc); RETURN(rc); } RETURN(0); }
/* * Create a new qsd_instance to be associated with backend osd device * identified by \dev. * * \param env - the environment passed by the caller * \param svname - is the service name of the OSD device creating this instance * \param dev - is the dt_device where to store quota index files * \param osd_proc - is the procfs parent directory where to create procfs file * related to this new qsd instance * * \retval - pointer to new qsd_instance associated with dev \dev on success, * appropriate error on failure */ struct qsd_instance *qsd_init(const struct lu_env *env, char *svname, struct dt_device *dev, cfs_proc_dir_entry_t *osd_proc) { struct qsd_thread_info *qti = qsd_info(env); struct qsd_instance *qsd; int rc, type, idx; ENTRY; /* only configure qsd for MDT & OST */ type = server_name2index(svname, &idx, NULL); if (type != LDD_F_SV_TYPE_MDT && type != LDD_F_SV_TYPE_OST) RETURN(NULL); /* allocate qsd instance */ OBD_ALLOC_PTR(qsd); if (qsd == NULL) RETURN(ERR_PTR(-ENOMEM)); /* generic initializations */ rwlock_init(&qsd->qsd_lock); CFS_INIT_LIST_HEAD(&qsd->qsd_link); thread_set_flags(&qsd->qsd_upd_thread, SVC_STOPPED); init_waitqueue_head(&qsd->qsd_upd_thread.t_ctl_waitq); CFS_INIT_LIST_HEAD(&qsd->qsd_upd_list); spin_lock_init(&qsd->qsd_adjust_lock); CFS_INIT_LIST_HEAD(&qsd->qsd_adjust_list); qsd->qsd_prepared = false; qsd->qsd_started = false; /* copy service name */ if (strlcpy(qsd->qsd_svname, svname, sizeof(qsd->qsd_svname)) >= sizeof(qsd->qsd_svname)) GOTO(out, rc = -E2BIG); /* grab reference on osd device */ lu_device_get(&dev->dd_lu_dev); lu_ref_add(&dev->dd_lu_dev.ld_reference, "qsd", qsd); qsd->qsd_dev = dev; /* we only support pool ID 0 (default data or metadata pool) for the * time being. A different pool ID could be assigned to this target via * the configuration log in the future */ qsd->qsd_pool_id = 0; /* get fsname from svname */ rc = server_name2fsname(svname, qti->qti_buf, NULL); if (rc) { CERROR("%s: fail to extract filesystem name\n", svname); GOTO(out, rc); } /* look up quota setting for the filesystem the target belongs to */ qsd->qsd_fsinfo = qsd_get_fsinfo(qti->qti_buf, 1); if (qsd->qsd_fsinfo == NULL) { CERROR("%s: failed to locate filesystem information\n", svname); GOTO(out, rc = -EINVAL); } /* add in the list of lquota_fsinfo */ mutex_lock(&qsd->qsd_fsinfo->qfs_mutex); list_add_tail(&qsd->qsd_link, &qsd->qsd_fsinfo->qfs_qsd_list); mutex_unlock(&qsd->qsd_fsinfo->qfs_mutex); /* register procfs directory */ qsd->qsd_proc = lprocfs_seq_register(QSD_DIR, osd_proc, lprocfs_quota_qsd_vars, qsd); if (IS_ERR(qsd->qsd_proc)) { rc = PTR_ERR(qsd->qsd_proc); qsd->qsd_proc = NULL; CERROR("%s: fail to create quota slave proc entry (%d)\n", svname, rc); GOTO(out, rc); } EXIT; out: if (rc) { qsd_fini(env, qsd); return ERR_PTR(rc); } RETURN(qsd); }
/* * Update a slave quota entry. This is done by reading enforcement status from * the copy of the global index and then how much is the slave currenly owns * for this user from the slave index copy. * * \param env - the environment passed by the caller * \param lqe - is the quota entry to refresh * \param arg - is the pointer to the qsd_qtype_info structure */ static int qsd_lqe_read(const struct lu_env *env, struct lquota_entry *lqe, void *arg) { struct qsd_thread_info *qti = qsd_info(env); struct qsd_qtype_info *qqi = (struct qsd_qtype_info *)arg; int rc; LASSERT(!lqe_is_master(lqe)); /* read record from global index copy to know whether quota is * enforced for this user */ rc = lquota_disk_read(env, qqi->qqi_glb_obj, &lqe->lqe_id, (struct dt_rec *)&qti->qti_glb_rec); switch(rc) { case -ENOENT: /* no such entry, assume quota isn't enforced for this user */ lqe->lqe_enforced = false; break; case 0: if (qti->qti_glb_rec.qbr_hardlimit == 0 && qti->qti_glb_rec.qbr_softlimit == 0) /* quota isn't enforced for this use */ lqe->lqe_enforced = false; else lqe->lqe_enforced = true; break; default: LQUOTA_ERROR(lqe, "failed to read quota entry from global " "index copy, rc:%d", rc); return rc; } /* read record from slave index copy to find out how much space is * currently owned by this slave */ rc = lquota_disk_read(env, qqi->qqi_slv_obj, &lqe->lqe_id, (struct dt_rec *)&qti->qti_slv_rec); switch(rc) { case -ENOENT: lqe->lqe_granted = 0; break; case 0: lqe->lqe_granted = qti->qti_slv_rec.qsr_granted; break; default: LQUOTA_ERROR(lqe, "failed to read quota entry from slave " "index copy, rc:%d", rc); return rc; } /* don't know what the qunit value is yet */ qsd_set_qunit(lqe, 0); /* read current disk-usage from disk */ rc = qsd_refresh_usage(env, lqe); if (rc) return rc; LQUOTA_DEBUG(lqe, "successfully read from disk"); return 0; }
/* * Get intent per-ID lock or global-index lock from master. * * \param env - the environment passed by the caller * \param exp - is the export to use to send the intent RPC * \param qbody - quota body to be packed in request * \param sync - synchronous or asynchronous (pre-acquire) * \param it_op - IT_QUOTA_DQACQ or IT_QUOTA_CONN * \param completion - completion callback * \param qqi - is the qsd_qtype_info structure to pass to the completion * function * \param lvb - is the lvb associated with the lock and returned by the * server * \param arg - is an opaq argument passed to the completion callback * * \retval 0 - success * \retval -ve - appropriate errors */ int qsd_intent_lock(const struct lu_env *env, struct obd_export *exp, struct quota_body *qbody, bool sync, int it_op, qsd_req_completion_t completion, struct qsd_qtype_info *qqi, struct lquota_lvb *lvb, void *arg) { struct qsd_thread_info *qti = qsd_info(env); struct ptlrpc_request *req; struct qsd_async_args *aa = NULL; struct ldlm_intent *lit; struct quota_body *req_qbody; __u64 flags = LDLM_FL_HAS_INTENT; int rc; ENTRY; LASSERT(exp != NULL); LASSERT(!lustre_handle_is_used(&qbody->qb_lockh)); memset(&qti->qti_lockh, 0, sizeof(qti->qti_lockh)); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_INTENT_QUOTA); if (req == NULL) GOTO(out, rc = -ENOMEM); req->rq_no_retry_einprogress = 1; rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); if (rc) { ptlrpc_request_free(req); GOTO(out, rc); } lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); lit->opc = (__u64)it_op; req_qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY); *req_qbody = *qbody; req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, sizeof(*lvb)); ptlrpc_request_set_replen(req); switch(it_op) { case IT_QUOTA_CONN: /* build resource name associated with global index */ fid_build_reg_res_name(&qbody->qb_fid, &qti->qti_resid); /* copy einfo template and fill ei_cbdata with qqi pointer */ memcpy(&qti->qti_einfo, &qsd_glb_einfo, sizeof(qti->qti_einfo)); qti->qti_einfo.ei_cbdata = qqi; /* don't cancel global lock on memory pressure */ flags |= LDLM_FL_NO_LRU; break; case IT_QUOTA_DQACQ: /* build resource name associated for per-ID quota lock */ fid_build_quota_res_name(&qbody->qb_fid, &qbody->qb_id, &qti->qti_resid); /* copy einfo template and fill ei_cbdata with lqe pointer */ memcpy(&qti->qti_einfo, &qsd_id_einfo, sizeof(qti->qti_einfo)); qti->qti_einfo.ei_cbdata = arg; break; default: LASSERTF(0, "invalid it_op %d", it_op); } /* build lock enqueue request */ rc = ldlm_cli_enqueue(exp, &req, &qti->qti_einfo, &qti->qti_resid, NULL, &flags, (void *)lvb, sizeof(*lvb), LVB_T_LQUOTA, &qti->qti_lockh, 1); if (rc < 0) { ptlrpc_req_finished(req); GOTO(out, rc); } /* grab reference on backend structure for the new lock */ switch(it_op) { case IT_QUOTA_CONN: /* grab reference on qqi for new lock */ #ifdef USE_LU_REF { struct ldlm_lock *lock; lock = ldlm_handle2lock(&qti->qti_lockh); if (lock == NULL) { ptlrpc_req_finished(req); GOTO(out, rc = -ENOLCK); } lu_ref_add(&qqi->qqi_reference, "glb_lock", lock); LDLM_LOCK_PUT(lock); } #endif qqi_getref(qqi); break; case IT_QUOTA_DQACQ: /* grab reference on lqe for new lock */ lqe_getref((struct lquota_entry *)arg); /* all acquire/release request are sent with no_resend and * no_delay flag */ req->rq_no_resend = req->rq_no_delay = 1; break; default: break; } CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); aa = ptlrpc_req_async_args(req); aa->aa_exp = exp; aa->aa_qqi = qqi; aa->aa_arg = arg; aa->aa_lvb = lvb; aa->aa_completion = completion; lustre_handle_copy(&aa->aa_lockh, &qti->qti_lockh); if (sync) { /* send lock enqueue request and wait for completion */ rc = ptlrpc_queue_wait(req); rc = qsd_intent_interpret(env, req, aa, rc); ptlrpc_req_finished(req); } else { /* queue lock request and return */ req->rq_interpret_reply = qsd_intent_interpret; ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1); } RETURN(rc); out: completion(env, qqi, qbody, NULL, &qti->qti_lockh, lvb, arg, rc); return rc; }
/** * Adjust quota space (by acquiring or releasing) hold by the quota slave. * This function is called after each quota request completion and during * reintegration in order to report usage or re-acquire quota locks. * Space adjustment is aborted if there is already a quota request in flight * for this ID. * * \param env - the environment passed by the caller * \param lqe - is the qid entry to be processed * * \retval 0 on success, appropriate errors on failure */ int qsd_adjust(const struct lu_env *env, struct lquota_entry *lqe) { struct qsd_thread_info *qti = qsd_info(env); struct quota_body *qbody = &qti->qti_body; struct qsd_instance *qsd; struct qsd_qtype_info *qqi; int rc; bool intent = false; ENTRY; memset(qbody, 0, sizeof(*qbody)); rc = qsd_ready(lqe, &qbody->qb_glb_lockh); if (rc) { /* add to adjust list again to trigger adjustment later when * slave is ready */ LQUOTA_DEBUG(lqe, "delaying adjustment since qsd isn't ready"); qsd_adjust_schedule(lqe, true, false); RETURN(0); } qqi = lqe2qqi(lqe); qsd = qqi->qqi_qsd; lqe_write_lock(lqe); /* fill qb_count & qb_flags */ if (!qsd_calc_adjust(lqe, qbody)) { lqe_write_unlock(lqe); LQUOTA_DEBUG(lqe, "no adjustment required"); RETURN(0); } /* only 1 quota request in flight for a given ID is allowed */ rc = qsd_request_enter(lqe); if (rc) { /* already a request in flight, space adjustment will be run * again on request completion */ lqe_write_unlock(lqe); RETURN(0); } if (req_is_rel(qbody->qb_flags)) lqe->lqe_pending_rel = qbody->qb_count; lustre_handle_copy(&qti->qti_lockh, &lqe->lqe_lockh); lqe_write_unlock(lqe); /* hold a refcount until completion */ lqe_getref(lqe); /* fill other quota body fields */ qbody->qb_fid = qqi->qqi_fid; qbody->qb_id = lqe->lqe_id; if (req_is_acq(qbody->qb_flags) || req_is_preacq(qbody->qb_flags)) { /* check whether we own a valid lock for this ID */ rc = qsd_id_lock_match(&qti->qti_lockh, &qbody->qb_lockh); if (rc) { memset(&qti->qti_lockh, 0, sizeof(qti->qti_lockh)); if (req_is_preacq(qbody->qb_flags)) { if (req_has_rep(qbody->qb_flags)) /* still want to report usage */ qbody->qb_flags = QUOTA_DQACQ_FL_REPORT; else /* no pre-acquire if no per-ID lock */ GOTO(out, rc = -ENOLCK); } else { /* no lock found, should use intent */ intent = true; } } else if (req_is_acq(qbody->qb_flags) && qbody->qb_count == 0) { /* found cached lock, no need to acquire */ GOTO(out, rc = 0); } } else { /* release and report don't need a per-ID lock */ memset(&qti->qti_lockh, 0, sizeof(qti->qti_lockh)); } if (!intent) { rc = qsd_send_dqacq(env, qsd->qsd_exp, qbody, false, qsd_req_completion, qqi, &qti->qti_lockh, lqe); } else { struct lquota_lvb *lvb; OBD_ALLOC_PTR(lvb); if (lvb == NULL) GOTO(out, rc = -ENOMEM); rc = qsd_intent_lock(env, qsd->qsd_exp, qbody, false, IT_QUOTA_DQACQ, qsd_req_completion, qqi, lvb, (void *)lqe); } /* the completion function will be called by qsd_send_dqacq or * qsd_intent_lock */ RETURN(rc); out: qsd_req_completion(env, qqi, qbody, NULL, &qti->qti_lockh, NULL, lqe, rc); return rc; }
/** * Acquire quota space from master. * There are at most 1 in-flight dqacq/dqrel. * * \param env - the environment passed by the caller * \param lqe - is the qid entry to be processed * * \retval 0 - success * \retval -EDQUOT - out of quota * \retval -EINPROGRESS - inform client to retry write/create * \retval -EBUSY - already a quota request in flight * \retval -ve - other appropriate errors */ static int qsd_acquire_remote(const struct lu_env *env, struct lquota_entry *lqe) { struct qsd_thread_info *qti = qsd_info(env); struct quota_body *qbody = &qti->qti_body; struct qsd_instance *qsd; struct qsd_qtype_info *qqi; int rc; ENTRY; memset(qbody, 0, sizeof(*qbody)); rc = qsd_ready(lqe, &qbody->qb_glb_lockh); if (rc) RETURN(rc); qqi = lqe2qqi(lqe); qsd = qqi->qqi_qsd; lqe_write_lock(lqe); /* is quota really enforced for this id? */ if (!lqe->lqe_enforced) { lqe_write_unlock(lqe); LQUOTA_DEBUG(lqe, "quota not enforced any more"); RETURN(0); } /* fill qb_count & qb_flags */ if (!qsd_calc_acquire(lqe, qbody)) { lqe_write_unlock(lqe); LQUOTA_DEBUG(lqe, "No acquire required"); RETURN(0); } /* check whether an acquire request completed recently */ if (lqe->lqe_acq_rc != 0 && cfs_time_before_64(cfs_time_shift_64(-1), lqe->lqe_acq_time)) { lqe_write_unlock(lqe); LQUOTA_DEBUG(lqe, "using cached return code %d", lqe->lqe_acq_rc); RETURN(lqe->lqe_acq_rc); } /* only 1 quota request in flight for a given ID is allowed */ rc = qsd_request_enter(lqe); if (rc) { lqe_write_unlock(lqe); RETURN(rc); } lustre_handle_copy(&qti->qti_lockh, &lqe->lqe_lockh); lqe_write_unlock(lqe); /* hold a refcount until completion */ lqe_getref(lqe); /* fill other quota body fields */ qbody->qb_fid = qqi->qqi_fid; qbody->qb_id = lqe->lqe_id; /* check whether we already own a valid lock for this ID */ rc = qsd_id_lock_match(&qti->qti_lockh, &qbody->qb_lockh); if (rc) { struct lquota_lvb *lvb; OBD_ALLOC_PTR(lvb); if (lvb == NULL) { rc = -ENOMEM; qsd_req_completion(env, qqi, qbody, NULL, &qti->qti_lockh, NULL, lqe, rc); RETURN(rc); } /* no lock found, should use intent */ rc = qsd_intent_lock(env, qsd->qsd_exp, qbody, true, IT_QUOTA_DQACQ, qsd_req_completion, qqi, lvb, (void *)lqe); } else { /* lock found, should use regular dqacq */ rc = qsd_send_dqacq(env, qsd->qsd_exp, qbody, true, qsd_req_completion, qqi, &qti->qti_lockh, lqe); } /* the completion function will be called by qsd_send_dqacq or * qsd_intent_lock */ RETURN(rc); }
/** * Callback function called when an acquire/release request sent to the master * is completed */ static void qsd_req_completion(const struct lu_env *env, struct qsd_qtype_info *qqi, struct quota_body *reqbody, struct quota_body *repbody, struct lustre_handle *lockh, struct lquota_lvb *lvb, void *arg, int ret) { struct lquota_entry *lqe = (struct lquota_entry *)arg; struct qsd_thread_info *qti; int rc; bool adjust = false, cancel = false; ENTRY; LASSERT(qqi != NULL && lqe != NULL); /* environment passed by ptlrpcd is mostly used by CLIO and hasn't the * DT tags set. */ rc = lu_env_refill_by_tags((struct lu_env *)env, LCT_DT_THREAD, 0); if (rc) { LQUOTA_ERROR(lqe, "failed to refill environmnent %d", rc); lqe_write_lock(lqe); /* can't afford to adjust quota space with no suitable lu_env */ GOTO(out_noadjust, rc); } qti = qsd_info(env); lqe_write_lock(lqe); LQUOTA_DEBUG(lqe, "DQACQ returned %d, flags:0x%x", ret, reqbody->qb_flags); /* despite -EDQUOT & -EINPROGRESS errors, the master might still * grant us back quota space to adjust quota overrun */ if (ret != 0 && ret != -EDQUOT && ret != -EINPROGRESS) { if (ret != -ETIMEDOUT && ret != -ENOTCONN && ret != -ESHUTDOWN && ret != -EAGAIN) /* print errors only if return code is unexpected */ LQUOTA_ERROR(lqe, "DQACQ failed with %d, flags:0x%x", ret, reqbody->qb_flags); GOTO(out, ret); } /* Set the lqe_lockh */ if (lustre_handle_is_used(lockh) && !lustre_handle_equal(lockh, &lqe->lqe_lockh)) lustre_handle_copy(&lqe->lqe_lockh, lockh); /* If the replied qb_count is zero, it means master didn't process * the DQACQ since the limit for this ID has been removed, so we * should not update quota entry & slave index copy neither. */ if (repbody != NULL && repbody->qb_count != 0) { LQUOTA_DEBUG(lqe, "DQACQ qb_count:"LPU64, repbody->qb_count); if (req_is_rel(reqbody->qb_flags)) { if (lqe->lqe_granted < repbody->qb_count) { LQUOTA_ERROR(lqe, "can't release more space " "than owned "LPU64"<"LPU64, lqe->lqe_granted, repbody->qb_count); lqe->lqe_granted = 0; } else { lqe->lqe_granted -= repbody->qb_count; } /* Cancel the per-ID lock initiatively when there * isn't any usage & grant, which can avoid master * sending glimpse unnecessarily to this slave on * quota revoking */ if (!lqe->lqe_pending_write && !lqe->lqe_granted && !lqe->lqe_waiting_write && !lqe->lqe_usage) cancel = true; } else { lqe->lqe_granted += repbody->qb_count; } qti->qti_rec.lqr_slv_rec.qsr_granted = lqe->lqe_granted; lqe_write_unlock(lqe); /* Update the slave index file in the dedicated thread. So far, * We don't update the version of slave index copy on DQACQ. * No locking is necessary since nobody can change * lqe->lqe_granted while lqe->lqe_pending_req > 0 */ qsd_upd_schedule(qqi, lqe, &lqe->lqe_id, &qti->qti_rec, 0, false); lqe_write_lock(lqe); } /* extract information from lvb */ if (ret == 0 && lvb != 0) { if (lvb->lvb_id_qunit != 0) qsd_set_qunit(lqe, lvb->lvb_id_qunit); qsd_set_edquot(lqe, !!(lvb->lvb_flags & LQUOTA_FL_EDQUOT)); } else if (repbody != NULL && repbody->qb_qunit != 0) { qsd_set_qunit(lqe, repbody->qb_qunit); } /* turn off pre-acquire if it failed with -EDQUOT. This is done to avoid * flooding the master with acquire request. Pre-acquire will be turned * on again as soon as qunit is modified */ if (req_is_preacq(reqbody->qb_flags) && ret == -EDQUOT) lqe->lqe_nopreacq = true; out: adjust = qsd_adjust_needed(lqe); if (reqbody && req_is_acq(reqbody->qb_flags) && ret != -EDQUOT) { lqe->lqe_acq_rc = ret; lqe->lqe_acq_time = cfs_time_current_64(); } out_noadjust: qsd_request_exit(lqe); lqe_write_unlock(lqe); /* release reference on per-ID lock */ if (lustre_handle_is_used(lockh)) ldlm_lock_decref(lockh, qsd_id_einfo.ei_mode); if (cancel) { qsd_adjust_schedule(lqe, false, true); } else if (adjust) { if (!ret || ret == -EDQUOT) qsd_adjust_schedule(lqe, false, false); else qsd_adjust_schedule(lqe, true, false); } lqe_putref(lqe); if (lvb) OBD_FREE_PTR(lvb); EXIT; }