/** * Check whether a slave already own a ldlm lock for the quota identifier \qid. * * \param lockh - is the local lock handle from lquota entry. * \param rlockh - is the remote lock handle of the matched lock, if any. * * \retval 0 : on successful look up and \lockh contains the lock handle. * \retval -ENOENT: no lock found */ int qsd_id_lock_match(struct lustre_handle *lockh, struct lustre_handle *rlockh) { struct ldlm_lock *lock; int rc; ENTRY; LASSERT(lockh); if (!lustre_handle_is_used(lockh)) RETURN(-ENOENT); rc = ldlm_lock_addref_try(lockh, qsd_id_einfo.ei_mode); if (rc) RETURN(-ENOENT); LASSERT(lustre_handle_is_used(lockh)); ldlm_lock_dump_handle(D_QUOTA, lockh); if (rlockh == NULL) /* caller not interested in remote handle */ RETURN(0); /* look up lock associated with local handle and extract remote handle * to be packed in quota request */ lock = ldlm_handle2lock(lockh); LASSERT(lock != NULL); lustre_handle_copy(rlockh, &lock->l_remote_handle); LDLM_LOCK_PUT(lock); RETURN(0); }
int qsd_id_lock_cancel(const struct lu_env *env, struct lquota_entry *lqe) { struct qsd_thread_info *qti = qsd_info(env); int rc; ENTRY; lqe_write_lock(lqe); if (lqe->lqe_pending_write || lqe->lqe_waiting_write || lqe->lqe_usage || lqe->lqe_granted) { lqe_write_unlock(lqe); RETURN(0); } lustre_handle_copy(&qti->qti_lockh, &lqe->lqe_lockh); if (lustre_handle_is_used(&qti->qti_lockh)) { memset(&lqe->lqe_lockh, 0, sizeof(lqe->lqe_lockh)); qsd_set_qunit(lqe, 0); lqe->lqe_edquot = false; } lqe_write_unlock(lqe); rc = qsd_id_lock_match(&qti->qti_lockh, NULL); if (rc) RETURN(rc); ldlm_lock_decref_and_cancel(&qti->qti_lockh, qsd_id_einfo.ei_mode); RETURN(0); }
/** * Invariant that has to be true all of the time. */ static int osc_lock_invariant(struct osc_lock *ols) { struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle); struct ldlm_lock *olock = ols->ols_lock; int handle_used = lustre_handle_is_used(&ols->ols_handle); return ergo(osc_lock_is_lockless(ols), ols->ols_locklessable && ols->ols_lock == NULL) || (ergo(olock != NULL, handle_used) && ergo(olock != NULL, olock->l_handle.h_cookie == ols->ols_handle.cookie) && /* * Check that ->ols_handle and ->ols_lock are consistent, but * take into account that they are set at the different time. */ ergo(handle_used, ergo(lock != NULL && olock != NULL, lock == olock) && ergo(lock == NULL, olock == NULL)) && ergo(ols->ols_state == OLS_CANCELLED, olock == NULL && !handle_used) && /* * DLM lock is destroyed only after we have seen cancellation * ast. */ ergo(olock != NULL && ols->ols_state < OLS_CANCELLED, !olock->l_destroyed) && ergo(ols->ols_state == OLS_GRANTED, olock != NULL && olock->l_req_mode == olock->l_granted_mode && ols->ols_hold)); }
static void mdt_rename_unlock(struct lustre_handle *lh) { ENTRY; LASSERT(lustre_handle_is_used(lh)); ldlm_lock_decref(lh, LCK_EX); EXIT; }
/** * Check whether a qsd instance is all set to send quota request to master. * This includes checking whether: * - the connection to master is set up and usable, * - the qsd isn't stopping * - reintegration has been successfully completed and all indexes are * up-to-date * * \param lqe - is the lquota entry for which we would like to send an quota * request * \param lockh - is the remote handle of the global lock returned on success * * \retval 0 on success, appropriate error on failure */ static int qsd_ready(struct lquota_entry *lqe, struct lustre_handle *lockh) { struct qsd_qtype_info *qqi = lqe2qqi(lqe); struct qsd_instance *qsd = qqi->qqi_qsd; struct obd_import *imp = NULL; struct ldlm_lock *lock; ENTRY; read_lock(&qsd->qsd_lock); /* is the qsd about to shut down? */ if (qsd->qsd_stopping) { read_unlock(&qsd->qsd_lock); LQUOTA_DEBUG(lqe, "dropping quota req since qsd is stopping"); /* Target is about to shut down, client will retry */ RETURN(-EINPROGRESS); } /* is the connection to the quota master ready? */ if (qsd->qsd_exp_valid) imp = class_exp2cliimp(qsd->qsd_exp); if (imp == NULL || imp->imp_invalid) { read_unlock(&qsd->qsd_lock); LQUOTA_DEBUG(lqe, "connection to master not ready"); RETURN(-ENOTCONN); } /* In most case, reintegration must have been triggered (when enable * quota or on OST start), however, in rare race condition (enabling * quota when starting OSTs), we might miss triggering reintegration * for some qqi. * * If the previous reintegration failed for some reason, we'll * re-trigger it here as well. */ if (!qqi->qqi_glb_uptodate || !qqi->qqi_slv_uptodate) { read_unlock(&qsd->qsd_lock); LQUOTA_DEBUG(lqe, "not up-to-date, dropping request and " "kicking off reintegration"); qsd_start_reint_thread(qqi); RETURN(-EINPROGRESS); } /* Fill the remote global lock handle, master will check this handle * to see if the slave is sending request with stale lock */ lustre_handle_copy(lockh, &qqi->qqi_lockh); read_unlock(&qsd->qsd_lock); if (!lustre_handle_is_used(lockh)) RETURN(-ENOLCK); lock = ldlm_handle2lock(lockh); if (lock == NULL) RETURN(-ENOLCK); /* return remote lock handle to be packed in quota request */ lustre_handle_copy(lockh, &lock->l_remote_handle); LDLM_LOCK_PUT(lock); RETURN(0); }
/** * Invariant that has to be true all of the time. */ static int osc_lock_invariant(struct osc_lock *ols) { struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle); struct ldlm_lock *olock = ols->ols_lock; int handle_used = lustre_handle_is_used(&ols->ols_handle); if (ergo(osc_lock_is_lockless(ols), ols->ols_locklessable && ols->ols_lock == NULL)) return 1; /* * If all the following "ergo"s are true, return 1, otherwise 0 */ if (!ergo(olock != NULL, handle_used)) return 0; if (!ergo(olock != NULL, olock->l_handle.h_cookie == ols->ols_handle.cookie)) return 0; if (!ergo(handle_used, ergo(lock != NULL && olock != NULL, lock == olock) && ergo(lock == NULL, olock == NULL))) return 0; /* * Check that ->ols_handle and ->ols_lock are consistent, but * take into account that they are set at the different time. */ if (!ergo(ols->ols_state == OLS_CANCELLED, olock == NULL && !handle_used)) return 0; /* * DLM lock is destroyed only after we have seen cancellation * ast. */ if (!ergo(olock != NULL && ols->ols_state < OLS_CANCELLED, ((olock->l_flags & LDLM_FL_DESTROYED) == 0))) return 0; if (!ergo(ols->ols_state == OLS_GRANTED, olock != NULL && olock->l_req_mode == olock->l_granted_mode && ols->ols_hold)) return 0; return 1; }
/* * Get intent per-ID lock or global-index lock from master. * * \param env - the environment passed by the caller * \param exp - is the export to use to send the intent RPC * \param qbody - quota body to be packed in request * \param sync - synchronous or asynchronous (pre-acquire) * \param it_op - IT_QUOTA_DQACQ or IT_QUOTA_CONN * \param completion - completion callback * \param qqi - is the qsd_qtype_info structure to pass to the completion * function * \param lvb - is the lvb associated with the lock and returned by the * server * \param arg - is an opaq argument passed to the completion callback * * \retval 0 - success * \retval -ve - appropriate errors */ int qsd_intent_lock(const struct lu_env *env, struct obd_export *exp, struct quota_body *qbody, bool sync, int it_op, qsd_req_completion_t completion, struct qsd_qtype_info *qqi, struct lquota_lvb *lvb, void *arg) { struct qsd_thread_info *qti = qsd_info(env); struct ptlrpc_request *req; struct qsd_async_args *aa = NULL; struct ldlm_intent *lit; struct quota_body *req_qbody; __u64 flags = LDLM_FL_HAS_INTENT; int rc; ENTRY; LASSERT(exp != NULL); LASSERT(!lustre_handle_is_used(&qbody->qb_lockh)); memset(&qti->qti_lockh, 0, sizeof(qti->qti_lockh)); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_INTENT_QUOTA); if (req == NULL) GOTO(out, rc = -ENOMEM); req->rq_no_retry_einprogress = 1; rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); if (rc) { ptlrpc_request_free(req); GOTO(out, rc); } lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); lit->opc = (__u64)it_op; req_qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY); *req_qbody = *qbody; req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, sizeof(*lvb)); ptlrpc_request_set_replen(req); switch(it_op) { case IT_QUOTA_CONN: /* build resource name associated with global index */ fid_build_reg_res_name(&qbody->qb_fid, &qti->qti_resid); /* copy einfo template and fill ei_cbdata with qqi pointer */ memcpy(&qti->qti_einfo, &qsd_glb_einfo, sizeof(qti->qti_einfo)); qti->qti_einfo.ei_cbdata = qqi; /* don't cancel global lock on memory pressure */ flags |= LDLM_FL_NO_LRU; break; case IT_QUOTA_DQACQ: /* build resource name associated for per-ID quota lock */ fid_build_quota_res_name(&qbody->qb_fid, &qbody->qb_id, &qti->qti_resid); /* copy einfo template and fill ei_cbdata with lqe pointer */ memcpy(&qti->qti_einfo, &qsd_id_einfo, sizeof(qti->qti_einfo)); qti->qti_einfo.ei_cbdata = arg; break; default: LASSERTF(0, "invalid it_op %d", it_op); } /* build lock enqueue request */ rc = ldlm_cli_enqueue(exp, &req, &qti->qti_einfo, &qti->qti_resid, NULL, &flags, (void *)lvb, sizeof(*lvb), LVB_T_LQUOTA, &qti->qti_lockh, 1); if (rc < 0) { ptlrpc_req_finished(req); GOTO(out, rc); } /* grab reference on backend structure for the new lock */ switch(it_op) { case IT_QUOTA_CONN: /* grab reference on qqi for new lock */ #ifdef USE_LU_REF { struct ldlm_lock *lock; lock = ldlm_handle2lock(&qti->qti_lockh); if (lock == NULL) { ptlrpc_req_finished(req); GOTO(out, rc = -ENOLCK); } lu_ref_add(&qqi->qqi_reference, "glb_lock", lock); LDLM_LOCK_PUT(lock); } #endif qqi_getref(qqi); break; case IT_QUOTA_DQACQ: /* grab reference on lqe for new lock */ lqe_getref((struct lquota_entry *)arg); /* all acquire/release request are sent with no_resend and * no_delay flag */ req->rq_no_resend = req->rq_no_delay = 1; break; default: break; } CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); aa = ptlrpc_req_async_args(req); aa->aa_exp = exp; aa->aa_qqi = qqi; aa->aa_arg = arg; aa->aa_lvb = lvb; aa->aa_completion = completion; lustre_handle_copy(&aa->aa_lockh, &qti->qti_lockh); if (sync) { /* send lock enqueue request and wait for completion */ rc = ptlrpc_queue_wait(req); rc = qsd_intent_interpret(env, req, aa, rc); ptlrpc_req_finished(req); } else { /* queue lock request and return */ req->rq_interpret_reply = qsd_intent_interpret; ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1); } RETURN(rc); out: completion(env, qqi, qbody, NULL, &qti->qti_lockh, lvb, arg, rc); return rc; }
/* * Handle quota request from slave. * * \param env - is the environment passed by the caller * \param ld - is the lu device associated with the qmt * \param req - is the quota acquire request */ static int qmt_dqacq(const struct lu_env *env, struct lu_device *ld, struct ptlrpc_request *req) { struct qmt_device *qmt = lu2qmt_dev(ld); struct quota_body *qbody, *repbody; struct obd_uuid *uuid; struct ldlm_lock *lock; struct lquota_entry *lqe; int pool_id, pool_type, qtype; int rc; ENTRY; qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY); if (qbody == NULL) RETURN(err_serious(-EPROTO)); repbody = req_capsule_server_get(&req->rq_pill, &RMF_QUOTA_BODY); if (repbody == NULL) RETURN(err_serious(-EFAULT)); /* verify if global lock is stale */ if (!lustre_handle_is_used(&qbody->qb_glb_lockh)) RETURN(-ENOLCK); lock = ldlm_handle2lock(&qbody->qb_glb_lockh); if (lock == NULL) RETURN(-ENOLCK); LDLM_LOCK_PUT(lock); uuid = &req->rq_export->exp_client_uuid; if (req_is_rel(qbody->qb_flags) + req_is_acq(qbody->qb_flags) + req_is_preacq(qbody->qb_flags) > 1) { CERROR("%s: malformed quota request with conflicting flags set " "(%x) from slave %s\n", qmt->qmt_svname, qbody->qb_flags, obd_uuid2str(uuid)); RETURN(-EPROTO); } if (req_is_acq(qbody->qb_flags) || req_is_preacq(qbody->qb_flags)) { /* acquire and pre-acquire should use a valid ID lock */ if (!lustre_handle_is_used(&qbody->qb_lockh)) RETURN(-ENOLCK); lock = ldlm_handle2lock(&qbody->qb_lockh); if (lock == NULL) /* no lock associated with this handle */ RETURN(-ENOLCK); LDLM_DEBUG(lock, "%sacquire request", req_is_preacq(qbody->qb_flags) ? "pre" : ""); if (!obd_uuid_equals(&lock->l_export->exp_client_uuid, uuid)) { /* sorry, no way to cheat ... */ LDLM_LOCK_PUT(lock); RETURN(-ENOLCK); } if ((lock->l_flags & LDLM_FL_AST_SENT) != 0) { struct ptlrpc_service_part *svc; unsigned int timeout; svc = req->rq_rqbd->rqbd_svcpt; timeout = at_est2timeout(at_get(&svc->scp_at_estimate)); timeout = max(timeout, ldlm_timeout); /* lock is being cancelled, prolong timeout */ ldlm_refresh_waiting_lock(lock, timeout); } LDLM_LOCK_PUT(lock); } /* extract pool & quota information from global index FID packed in the * request */ rc = lquota_extract_fid(&qbody->qb_fid, &pool_id, &pool_type, &qtype); if (rc) RETURN(-EINVAL); /* Find the quota entry associated with the quota id */ lqe = qmt_pool_lqe_lookup(env, qmt, pool_id, pool_type, qtype, &qbody->qb_id); if (IS_ERR(lqe)) RETURN(PTR_ERR(lqe)); /* process quota request */ rc = qmt_dqacq0(env, lqe, qmt, uuid, qbody->qb_flags, qbody->qb_count, qbody->qb_usage, repbody); if (lustre_handle_is_used(&qbody->qb_lockh)) /* return current qunit value only to slaves owning an per-ID * quota lock. For enqueue, the qunit value will be returned in * the LVB */ repbody->qb_qunit = lqe->lqe_qunit; lqe_putref(lqe); RETURN(rc); }
/** * Callback function called when an acquire/release request sent to the master * is completed */ static void qsd_req_completion(const struct lu_env *env, struct qsd_qtype_info *qqi, struct quota_body *reqbody, struct quota_body *repbody, struct lustre_handle *lockh, struct lquota_lvb *lvb, void *arg, int ret) { struct lquota_entry *lqe = (struct lquota_entry *)arg; struct qsd_thread_info *qti; int rc; bool adjust = false, cancel = false; ENTRY; LASSERT(qqi != NULL && lqe != NULL); /* environment passed by ptlrpcd is mostly used by CLIO and hasn't the * DT tags set. */ rc = lu_env_refill_by_tags((struct lu_env *)env, LCT_DT_THREAD, 0); if (rc) { LQUOTA_ERROR(lqe, "failed to refill environmnent %d", rc); lqe_write_lock(lqe); /* can't afford to adjust quota space with no suitable lu_env */ GOTO(out_noadjust, rc); } qti = qsd_info(env); lqe_write_lock(lqe); LQUOTA_DEBUG(lqe, "DQACQ returned %d, flags:0x%x", ret, reqbody->qb_flags); /* despite -EDQUOT & -EINPROGRESS errors, the master might still * grant us back quota space to adjust quota overrun */ if (ret != 0 && ret != -EDQUOT && ret != -EINPROGRESS) { if (ret != -ETIMEDOUT && ret != -ENOTCONN && ret != -ESHUTDOWN && ret != -EAGAIN) /* print errors only if return code is unexpected */ LQUOTA_ERROR(lqe, "DQACQ failed with %d, flags:0x%x", ret, reqbody->qb_flags); GOTO(out, ret); } /* Set the lqe_lockh */ if (lustre_handle_is_used(lockh) && !lustre_handle_equal(lockh, &lqe->lqe_lockh)) lustre_handle_copy(&lqe->lqe_lockh, lockh); /* If the replied qb_count is zero, it means master didn't process * the DQACQ since the limit for this ID has been removed, so we * should not update quota entry & slave index copy neither. */ if (repbody != NULL && repbody->qb_count != 0) { LQUOTA_DEBUG(lqe, "DQACQ qb_count:"LPU64, repbody->qb_count); if (req_is_rel(reqbody->qb_flags)) { if (lqe->lqe_granted < repbody->qb_count) { LQUOTA_ERROR(lqe, "can't release more space " "than owned "LPU64"<"LPU64, lqe->lqe_granted, repbody->qb_count); lqe->lqe_granted = 0; } else { lqe->lqe_granted -= repbody->qb_count; } /* Cancel the per-ID lock initiatively when there * isn't any usage & grant, which can avoid master * sending glimpse unnecessarily to this slave on * quota revoking */ if (!lqe->lqe_pending_write && !lqe->lqe_granted && !lqe->lqe_waiting_write && !lqe->lqe_usage) cancel = true; } else { lqe->lqe_granted += repbody->qb_count; } qti->qti_rec.lqr_slv_rec.qsr_granted = lqe->lqe_granted; lqe_write_unlock(lqe); /* Update the slave index file in the dedicated thread. So far, * We don't update the version of slave index copy on DQACQ. * No locking is necessary since nobody can change * lqe->lqe_granted while lqe->lqe_pending_req > 0 */ qsd_upd_schedule(qqi, lqe, &lqe->lqe_id, &qti->qti_rec, 0, false); lqe_write_lock(lqe); } /* extract information from lvb */ if (ret == 0 && lvb != 0) { if (lvb->lvb_id_qunit != 0) qsd_set_qunit(lqe, lvb->lvb_id_qunit); qsd_set_edquot(lqe, !!(lvb->lvb_flags & LQUOTA_FL_EDQUOT)); } else if (repbody != NULL && repbody->qb_qunit != 0) { qsd_set_qunit(lqe, repbody->qb_qunit); } /* turn off pre-acquire if it failed with -EDQUOT. This is done to avoid * flooding the master with acquire request. Pre-acquire will be turned * on again as soon as qunit is modified */ if (req_is_preacq(reqbody->qb_flags) && ret == -EDQUOT) lqe->lqe_nopreacq = true; out: adjust = qsd_adjust_needed(lqe); if (reqbody && req_is_acq(reqbody->qb_flags) && ret != -EDQUOT) { lqe->lqe_acq_rc = ret; lqe->lqe_acq_time = cfs_time_current_64(); } out_noadjust: qsd_request_exit(lqe); lqe_write_unlock(lqe); /* release reference on per-ID lock */ if (lustre_handle_is_used(lockh)) ldlm_lock_decref(lockh, qsd_id_einfo.ei_mode); if (cancel) { qsd_adjust_schedule(lqe, false, true); } else if (adjust) { if (!ret || ret == -EDQUOT) qsd_adjust_schedule(lqe, false, false); else qsd_adjust_schedule(lqe, true, false); } lqe_putref(lqe); if (lvb) OBD_FREE_PTR(lvb); EXIT; }
/** * Check whether any quota space adjustment (pre-acquire/release/report) is * needed for a given quota ID. If a non-null \a qbody is passed, then the * \a qbody structure (qb_count/flags/usage) is filled with appropriate data * to be packed in the quota request. * * \param lqe - is the lquota entry for which we would like to adjust quota * space. * \param qbody - is the quota body to fill, if not NULL. * * \retval true - space adjustment is required and \a qbody is filled, if not * NULL * \retval false - no space adjustment required */ static bool qsd_calc_adjust(struct lquota_entry *lqe, struct quota_body *qbody) { __u64 usage, granted; ENTRY; usage = lqe->lqe_usage; usage += lqe->lqe_pending_write + lqe->lqe_waiting_write; granted = lqe->lqe_granted; if (qbody != NULL) qbody->qb_flags = 0; if (!lqe->lqe_enforced) { /* quota not enforced any more for this ID */ if (granted != 0) { /* release all quota space unconditionally */ LQUOTA_DEBUG(lqe, "not enforced, releasing all space"); if (qbody != NULL) { qbody->qb_count = granted; qbody->qb_flags = QUOTA_DQACQ_FL_REL; } RETURN(true); } RETURN(false); } if (!lustre_handle_is_used(&lqe->lqe_lockh)) { /* No valid per-ID lock * When reporting quota (during reintegration or on setquota * glimpse), we should release granted space if usage is 0. * Otherwise, if the usage is less than granted, we need to * acquire the per-ID lock to make sure the unused grant can be * reclaimed by per-ID lock glimpse. */ if (usage == 0) { /* no on-disk usage and no outstanding activity, release * space */ if (granted != 0) { LQUOTA_DEBUG(lqe, "no usage, releasing all " "space"); if (qbody != NULL) { qbody->qb_count = granted; qbody->qb_flags = QUOTA_DQACQ_FL_REL; } RETURN(true); } LQUOTA_DEBUG(lqe, "no usage + no granted, nothing to " "do"); RETURN(false); } if (lqe->lqe_usage < lqe->lqe_granted) { /* holding quota space w/o any lock, enqueue per-ID lock * again */ LQUOTA_DEBUG(lqe, "(re)acquiring per-ID lock"); if (qbody != NULL) { qbody->qb_count = 0; qbody->qb_flags = QUOTA_DQACQ_FL_ACQ; } RETURN(true); } if (lqe->lqe_usage > lqe->lqe_granted) { /* quota overrun, report usage */ LQUOTA_DEBUG(lqe, "overrun, reporting usage"); if (qbody != NULL) { qbody->qb_usage = lqe->lqe_usage; qbody->qb_flags = QUOTA_DQACQ_FL_REPORT; } RETURN(true); } LQUOTA_DEBUG(lqe, "granted matches usage, nothing to do"); RETURN(false); } /* valid per-ID lock * Apply good old quota qunit adjustment logic which has been around * since lustre 1.4: * 1. release spare quota space? */ if (granted > usage + lqe->lqe_qunit) { /* pre-release quota space */ if (qbody == NULL) RETURN(true); qbody->qb_count = granted - usage; /* if usage == 0, release all granted space */ if (usage) { /* try to keep one qunit of quota space */ qbody->qb_count -= lqe->lqe_qunit; /* but don't release less than qtune to avoid releasing * space too often */ if (qbody->qb_count < lqe->lqe_qtune) qbody->qb_count = lqe->lqe_qtune; } qbody->qb_flags = QUOTA_DQACQ_FL_REL; RETURN(true); } /* 2. Any quota overrun? */ if (lqe->lqe_usage > lqe->lqe_granted) { /* we overconsumed quota space, we report usage in request so * that master can adjust it unconditionally */ if (qbody == NULL) RETURN(true); qbody->qb_usage = lqe->lqe_usage; granted = lqe->lqe_usage; qbody->qb_flags = QUOTA_DQACQ_FL_REPORT; } /* 3. Time to pre-acquire? */ if (!lqe->lqe_edquot && !lqe->lqe_nopreacq && usage > 0 && lqe->lqe_qunit != 0 && granted < usage + lqe->lqe_qtune) { /* To pre-acquire quota space, we report how much spare quota * space the slave currently owns, then the master will grant us * back how much we can pretend given the current state of * affairs */ if (qbody == NULL) RETURN(true); if (granted <= usage) qbody->qb_count = 0; else qbody->qb_count = granted - usage; qbody->qb_flags |= QUOTA_DQACQ_FL_PREACQ; RETURN(true); } if (qbody != NULL) RETURN(qbody->qb_flags != 0); else RETURN(false); }
/* * VBR: rename versions in reply: 0 - src parent; 1 - tgt parent; * 2 - src child; 3 - tgt child. * Update on disk version of src child. */ static int mdt_reint_rename(struct mdt_thread_info *info, struct mdt_lock_handle *lhc) { struct mdt_reint_record *rr = &info->mti_rr; struct md_attr *ma = &info->mti_attr; struct ptlrpc_request *req = mdt_info_req(info); struct mdt_object *msrcdir; struct mdt_object *mtgtdir; struct mdt_object *mold; struct mdt_object *mnew = NULL; struct mdt_lock_handle *lh_srcdirp; struct mdt_lock_handle *lh_tgtdirp; struct mdt_lock_handle *lh_oldp; struct mdt_lock_handle *lh_newp; struct lu_fid *old_fid = &info->mti_tmp_fid1; struct lu_fid *new_fid = &info->mti_tmp_fid2; struct lustre_handle rename_lh = { 0 }; struct lu_name slname = { 0 }; struct lu_name *lname; int rc; ENTRY; if (info->mti_dlm_req) ldlm_request_cancel(req, info->mti_dlm_req, 0); DEBUG_REQ(D_INODE, req, "rename "DFID"/%s to "DFID"/%s", PFID(rr->rr_fid1), rr->rr_name, PFID(rr->rr_fid2), rr->rr_tgt); rc = mdt_rename_lock(info, &rename_lh); if (rc) { CERROR("Can't lock FS for rename, rc %d\n", rc); RETURN(rc); } lh_newp = &info->mti_lh[MDT_LH_NEW]; /* step 1: lock the source dir. */ lh_srcdirp = &info->mti_lh[MDT_LH_PARENT]; mdt_lock_pdo_init(lh_srcdirp, LCK_PW, rr->rr_name, rr->rr_namelen); msrcdir = mdt_object_find_lock(info, rr->rr_fid1, lh_srcdirp, MDS_INODELOCK_UPDATE); if (IS_ERR(msrcdir)) GOTO(out_rename_lock, rc = PTR_ERR(msrcdir)); if (mdt_object_obf(msrcdir)) GOTO(out_unlock_source, rc = -EPERM); rc = mdt_version_get_check_save(info, msrcdir, 0); if (rc) GOTO(out_unlock_source, rc); /* step 2: find & lock the target dir. */ lh_tgtdirp = &info->mti_lh[MDT_LH_CHILD]; mdt_lock_pdo_init(lh_tgtdirp, LCK_PW, rr->rr_tgt, rr->rr_tgtlen); if (lu_fid_eq(rr->rr_fid1, rr->rr_fid2)) { mdt_object_get(info->mti_env, msrcdir); mtgtdir = msrcdir; if (lh_tgtdirp->mlh_pdo_hash != lh_srcdirp->mlh_pdo_hash) { rc = mdt_pdir_hash_lock(info, lh_tgtdirp, mtgtdir, MDS_INODELOCK_UPDATE); if (rc) GOTO(out_unlock_source, rc); OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_PDO_LOCK2, 10); } } else { mtgtdir = mdt_object_find(info->mti_env, info->mti_mdt, rr->rr_fid2); if (IS_ERR(mtgtdir)) GOTO(out_unlock_source, rc = PTR_ERR(mtgtdir)); if (mdt_object_obf(mtgtdir)) GOTO(out_put_target, rc = -EPERM); /* check early, the real version will be saved after locking */ rc = mdt_version_get_check(info, mtgtdir, 1); if (rc) GOTO(out_put_target, rc); if (unlikely(mdt_object_remote(mtgtdir))) { CDEBUG(D_INFO, "Source dir "DFID" target dir "DFID "on different MDTs\n", PFID(rr->rr_fid1), PFID(rr->rr_fid2)); GOTO(out_put_target, rc = -EXDEV); } else { if (likely(mdt_object_exists(mtgtdir))) { /* we lock the target dir if it is local */ rc = mdt_object_lock(info, mtgtdir, lh_tgtdirp, MDS_INODELOCK_UPDATE, MDT_LOCAL_LOCK); if (rc != 0) GOTO(out_put_target, rc); /* get and save correct version after locking */ mdt_version_get_save(info, mtgtdir, 1); } else { GOTO(out_put_target, rc = -ESTALE); } } } /* step 3: find & lock the old object. */ lname = mdt_name(info->mti_env, (char *)rr->rr_name, rr->rr_namelen); mdt_name_copy(&slname, lname); fid_zero(old_fid); rc = mdt_lookup_version_check(info, msrcdir, &slname, old_fid, 2); if (rc != 0) GOTO(out_unlock_target, rc); if (lu_fid_eq(old_fid, rr->rr_fid1) || lu_fid_eq(old_fid, rr->rr_fid2)) GOTO(out_unlock_target, rc = -EINVAL); mold = mdt_object_find(info->mti_env, info->mti_mdt, old_fid); if (IS_ERR(mold)) GOTO(out_unlock_target, rc = PTR_ERR(mold)); if (mdt_object_remote(mold)) { mdt_object_put(info->mti_env, mold); CDEBUG(D_INFO, "Source child "DFID" is on another MDT\n", PFID(old_fid)); GOTO(out_unlock_target, rc = -EXDEV); } if (mdt_object_obf(mold)) { mdt_object_put(info->mti_env, mold); GOTO(out_unlock_target, rc = -EPERM); } lh_oldp = &info->mti_lh[MDT_LH_OLD]; mdt_lock_reg_init(lh_oldp, LCK_EX); rc = mdt_object_lock(info, mold, lh_oldp, MDS_INODELOCK_LOOKUP, MDT_CROSS_LOCK); if (rc != 0) { mdt_object_put(info->mti_env, mold); GOTO(out_unlock_target, rc); } info->mti_mos = mold; /* save version after locking */ mdt_version_get_save(info, mold, 2); mdt_set_capainfo(info, 2, old_fid, BYPASS_CAPA); /* step 4: find & lock the new object. */ /* new target object may not exist now */ lname = mdt_name(info->mti_env, (char *)rr->rr_tgt, rr->rr_tgtlen); /* lookup with version checking */ fid_zero(new_fid); rc = mdt_lookup_version_check(info, mtgtdir, lname, new_fid, 3); if (rc == 0) { /* the new_fid should have been filled at this moment */ if (lu_fid_eq(old_fid, new_fid)) GOTO(out_unlock_old, rc); if (lu_fid_eq(new_fid, rr->rr_fid1) || lu_fid_eq(new_fid, rr->rr_fid2)) GOTO(out_unlock_old, rc = -EINVAL); mdt_lock_reg_init(lh_newp, LCK_EX); mnew = mdt_object_find(info->mti_env, info->mti_mdt, new_fid); if (IS_ERR(mnew)) GOTO(out_unlock_old, rc = PTR_ERR(mnew)); if (mdt_object_obf(mnew)) { mdt_object_put(info->mti_env, mnew); GOTO(out_unlock_old, rc = -EPERM); } if (mdt_object_remote(mnew)) { mdt_object_put(info->mti_env, mnew); CDEBUG(D_INFO, "src child "DFID" is on another MDT\n", PFID(new_fid)); GOTO(out_unlock_old, rc = -EXDEV); } rc = mdt_object_lock(info, mnew, lh_newp, MDS_INODELOCK_FULL, MDT_CROSS_LOCK); if (rc != 0) { mdt_object_put(info->mti_env, mnew); GOTO(out_unlock_old, rc); } /* get and save version after locking */ mdt_version_get_save(info, mnew, 3); mdt_set_capainfo(info, 3, new_fid, BYPASS_CAPA); } else if (rc != -EREMOTE && rc != -ENOENT) { GOTO(out_unlock_old, rc); } else { mdt_enoent_version_save(info, 3); } /* step 5: rename it */ mdt_reint_init_ma(info, ma); mdt_fail_write(info->mti_env, info->mti_mdt->mdt_bottom, OBD_FAIL_MDS_REINT_RENAME_WRITE); /* Check if @dst is subdir of @src. */ rc = mdt_rename_sanity(info, old_fid); if (rc) GOTO(out_unlock_new, rc); rc = mdo_rename(info->mti_env, mdt_object_child(msrcdir), mdt_object_child(mtgtdir), old_fid, &slname, (mnew ? mdt_object_child(mnew) : NULL), lname, ma); /* handle last link of tgt object */ if (rc == 0) { mdt_counter_incr(req, LPROC_MDT_RENAME); if (mnew) mdt_handle_last_unlink(info, mnew, ma); mdt_rename_counter_tally(info, info->mti_mdt, req, msrcdir, mtgtdir); } EXIT; out_unlock_new: if (mnew) mdt_object_unlock_put(info, mnew, lh_newp, rc); out_unlock_old: mdt_object_unlock_put(info, mold, lh_oldp, rc); out_unlock_target: mdt_object_unlock(info, mtgtdir, lh_tgtdirp, rc); out_put_target: mdt_object_put(info->mti_env, mtgtdir); out_unlock_source: mdt_object_unlock_put(info, msrcdir, lh_srcdirp, rc); out_rename_lock: if (lustre_handle_is_used(&rename_lh)) mdt_rename_unlock(&rename_lh); return rc; }