int quota_is_set(struct obd_device *obd, const unsigned int id[], int flag) { struct lustre_qunit_size *lqs; int i, q_set = 0; if (!ll_sb_any_quota_active(obd->u.obt.obt_qctxt.lqc_sb)) RETURN(0); for (i = 0; i < MAXQUOTAS; i++) { lqs = quota_search_lqs(LQS_KEY(i, id[i]), &obd->u.obt.obt_qctxt, 0); if (lqs && !IS_ERR(lqs)) { if (lqs->lqs_flags & flag) q_set = 1; lqs_putref(lqs); } } return q_set; }
int filter_quota_ctl(struct obd_device *unused, struct obd_export *exp, struct obd_quotactl *oqctl) { struct obd_device *obd = exp->exp_obd; struct obd_device_target *obt = &obd->u.obt; struct lvfs_run_ctxt saved; struct lustre_quota_ctxt *qctxt = &obt->obt_qctxt; struct lustre_qunit_size *lqs; void *handle = NULL; struct timeval work_start; struct timeval work_end; long timediff; int rc = 0; ENTRY; cfs_gettimeofday(&work_start); switch (oqctl->qc_cmd) { case Q_QUOTAON: oqctl->qc_id = obt->obt_qfmt; rc = generic_quota_on(obd, oqctl, 0); break; case Q_FINVALIDATE: case Q_QUOTAOFF: cfs_down(&obt->obt_quotachecking); if (oqctl->qc_cmd == Q_FINVALIDATE && (obt->obt_qctxt.lqc_flags & UGQUOTA2LQC(oqctl->qc_type))) { CWARN("quota[%u] is on yet\n", oqctl->qc_type); cfs_up(&obt->obt_quotachecking); rc = -EBUSY; break; } oqctl->qc_id = obt->obt_qfmt; /* override qfmt version */ case Q_GETOINFO: case Q_GETOQUOTA: case Q_GETQUOTA: /* In recovery scenario, this pending dqacq/dqrel might have * been processed by master successfully before it's dquot * on master enter recovery mode. We must wait for this * dqacq/dqrel done then return the correct limits to master */ if (oqctl->qc_stat == QUOTA_RECOVERING) handle = quota_barrier(&obd->u.obt.obt_qctxt, oqctl, 1); push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL); rc = fsfilt_quotactl(obd, obt->obt_sb, oqctl); pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL); if (oqctl->qc_stat == QUOTA_RECOVERING) quota_unbarrier(handle); if (oqctl->qc_cmd == Q_QUOTAOFF || oqctl->qc_cmd == Q_FINVALIDATE) { if (oqctl->qc_cmd == Q_QUOTAOFF) { if (!rc) obt->obt_qctxt.lqc_flags &= ~UGQUOTA2LQC(oqctl->qc_type); else if (quota_is_off(qctxt, oqctl)) rc = -EALREADY; CDEBUG(D_QUOTA, "%s: quotaoff type:flags:rc " "%u:%lu:%d\n", obd->obd_name, oqctl->qc_type, qctxt->lqc_flags, rc); } cfs_up(&obt->obt_quotachecking); } break; case Q_SETQUOTA: /* currently, it is only used for nullifying the quota */ handle = quota_barrier(&obd->u.obt.obt_qctxt, oqctl, 1); push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL); rc = fsfilt_quotactl(obd, obd->u.obt.obt_sb, oqctl); if (!rc) { oqctl->qc_cmd = Q_SYNC; fsfilt_quotactl(obd, obd->u.obt.obt_sb, oqctl); oqctl->qc_cmd = Q_SETQUOTA; } pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL); quota_unbarrier(handle); lqs = quota_search_lqs(LQS_KEY(oqctl->qc_type, oqctl->qc_id), qctxt, 0); if (lqs == NULL || IS_ERR(lqs)){ CERROR("fail to create lqs during setquota operation " "for %sid %u\n", oqctl->qc_type ? "g" : "u", oqctl->qc_id); } else { lqs->lqs_flags &= ~QB_SET; lqs_putref(lqs); } break; case Q_INITQUOTA: { unsigned int id[MAXQUOTAS] = { 0, 0 }; /* Initialize quota limit to MIN_QLIMIT */ LASSERT(oqctl->qc_dqblk.dqb_valid == QIF_BLIMITS); LASSERT(oqctl->qc_dqblk.dqb_bsoftlimit == 0); if (!oqctl->qc_dqblk.dqb_bhardlimit) goto adjust; /* There might be a pending dqacq/dqrel (which is going to * clear stale limits on slave). we should wait for it's * completion then initialize limits */ handle = quota_barrier(&obd->u.obt.obt_qctxt, oqctl, 1); LASSERT(oqctl->qc_dqblk.dqb_bhardlimit == MIN_QLIMIT); push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL); rc = fsfilt_quotactl(obd, obd->u.obt.obt_sb, oqctl); /* Update on-disk quota, in case of lose the changed limits * (MIN_QLIMIT) on crash, which cannot be recovered.*/ if (!rc) { oqctl->qc_cmd = Q_SYNC; fsfilt_quotactl(obd, obd->u.obt.obt_sb, oqctl); oqctl->qc_cmd = Q_INITQUOTA; } pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL); quota_unbarrier(handle); if (rc) RETURN(rc); adjust: lqs = quota_search_lqs(LQS_KEY(oqctl->qc_type, oqctl->qc_id), qctxt, 1); if (lqs == NULL || IS_ERR(lqs)){ CERROR("fail to create lqs during setquota operation " "for %sid %u\n", oqctl->qc_type ? "g" : "u", oqctl->qc_id); break; } else { lqs->lqs_flags |= QB_SET; lqs_putref(lqs); } /* Trigger qunit pre-acquire */ if (oqctl->qc_type == USRQUOTA) id[USRQUOTA] = oqctl->qc_id; else id[GRPQUOTA] = oqctl->qc_id; rc = qctxt_adjust_qunit(obd, &obd->u.obt.obt_qctxt, id, 1, 0, NULL); if (rc == -EDQUOT || rc == -EBUSY) { CDEBUG(D_QUOTA, "rc: %d.\n", rc); rc = 0; } break; } default: CERROR("%s: unsupported filter_quotactl command: %d\n", obd->obd_name, oqctl->qc_cmd); RETURN(-EFAULT); } cfs_gettimeofday(&work_end); timediff = cfs_timeval_sub(&work_end, &work_start, NULL); lprocfs_counter_add(qctxt->lqc_stats, LQUOTA_QUOTA_CTL, timediff); RETURN(rc); }
int quota_adjust_slave_lqs(struct quota_adjust_qunit *oqaq, struct lustre_quota_ctxt *qctxt) { struct lustre_qunit_size *lqs = NULL; unsigned long *unit, *tune; signed long tmp = 0; cfs_time_t time_limit = 0, *shrink; int i, rc = 0; ENTRY; LASSERT(qctxt); lqs = quota_search_lqs(LQS_KEY(QAQ_IS_GRP(oqaq), oqaq->qaq_id), qctxt, QAQ_IS_CREATE_LQS(oqaq) ? 1 : 0); if (lqs == NULL || IS_ERR(lqs)){ CERROR("fail to find a lqs for %sid %u!\n", QAQ_IS_GRP(oqaq) ? "g" : "u", oqaq->qaq_id); RETURN(PTR_ERR(lqs)); } CDEBUG(D_QUOTA, "before: bunit: %lu, iunit: %lu.\n", lqs->lqs_bunit_sz, lqs->lqs_iunit_sz); cfs_spin_lock(&lqs->lqs_lock); for (i = 0; i < 2; i++) { if (i == 0 && !QAQ_IS_ADJBLK(oqaq)) continue; if (i == 1 && !QAQ_IS_ADJINO(oqaq)) continue; tmp = i ? (lqs->lqs_iunit_sz - oqaq->qaq_iunit_sz) : (lqs->lqs_bunit_sz - oqaq->qaq_bunit_sz); shrink = i ? &lqs->lqs_last_ishrink : &lqs->lqs_last_bshrink; time_limit = cfs_time_add(i ? lqs->lqs_last_ishrink : lqs->lqs_last_bshrink, cfs_time_seconds(qctxt->lqc_switch_seconds)); unit = i ? &lqs->lqs_iunit_sz : &lqs->lqs_bunit_sz; tune = i ? &lqs->lqs_itune_sz : &lqs->lqs_btune_sz; /* quota master shrinks */ if (qctxt->lqc_handler && tmp > 0) *shrink = cfs_time_current(); /* quota master enlarges */ if (qctxt->lqc_handler && tmp < 0) { /* in case of ping-pong effect, don't enlarge lqs * in a short time */ if (*shrink && cfs_time_before(cfs_time_current(), time_limit)) tmp = 0; } /* when setquota, don't enlarge lqs b=18616 */ if (QAQ_IS_CREATE_LQS(oqaq) && tmp < 0) tmp = 0; if (tmp != 0) { *unit = i ? oqaq->qaq_iunit_sz : oqaq->qaq_bunit_sz; *tune = (*unit) / 2; } if (tmp > 0) rc |= i ? LQS_INO_DECREASE : LQS_BLK_DECREASE; if (tmp < 0) rc |= i ? LQS_INO_INCREASE : LQS_BLK_INCREASE; } cfs_spin_unlock(&lqs->lqs_lock); CDEBUG(D_QUOTA, "after: bunit: %lu, iunit: %lu.\n", lqs->lqs_bunit_sz, lqs->lqs_iunit_sz); lqs_putref(lqs); RETURN(rc); }
/** * when a block_write or inode_create rpc is finished, adjust the record for * pending blocks and inodes */ static int quota_pending_commit(struct obd_device *obd, const unsigned int id[], int pending[], int isblk) { struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt; struct timeval work_start; struct timeval work_end; long timediff; int i; struct qunit_data qdata[MAXQUOTAS]; ENTRY; CDEBUG(D_QUOTA, "commit pending quota for %s\n", obd->obd_name); CLASSERT(MAXQUOTAS < 4); if (!ll_sb_any_quota_active(qctxt->lqc_sb)) RETURN(0); cfs_gettimeofday(&work_start); for (i = 0; i < MAXQUOTAS; i++) { struct lustre_qunit_size *lqs = NULL; LASSERT(pending[i] >= 0); if (pending[i] == 0) continue; qdata[i].qd_id = id[i]; qdata[i].qd_flags = i; if (isblk) QDATA_SET_BLK(&qdata[i]); qdata[i].qd_count = 0; if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i])) continue; lqs = quota_search_lqs(LQS_KEY(i, qdata[i].qd_id), qctxt, 0); if (lqs == NULL || IS_ERR(lqs)) { CERROR("can not find lqs for pending_commit: " "[id %u] [%c] [pending %u] [isblk %d] (rc %ld), " "maybe cause unexpected lqs refcount error!\n", id[i], i ? 'g': 'u', pending[i], isblk, lqs ? PTR_ERR(lqs) : -1); continue; } cfs_spin_lock(&lqs->lqs_lock); if (isblk) { LASSERTF(lqs->lqs_bwrite_pending >= pending[i], "there are too many blocks! [id %u] [%c] " "[bwrite_pending %lu] [pending %u]\n", id[i], i % 2 ? 'g' : 'u', lqs->lqs_bwrite_pending, pending[i]); lqs->lqs_bwrite_pending -= pending[i]; } else { LASSERTF(lqs->lqs_iwrite_pending >= pending[i], "there are too many files! [id %u] [%c] " "[iwrite_pending %lu] [pending %u]\n", id[i], i % 2 ? 'g' : 'u', lqs->lqs_iwrite_pending, pending[i]); lqs->lqs_iwrite_pending -= pending[i]; } CDEBUG(D_QUOTA, "%s: lqs_pending=%lu pending[%d]=%d isblk=%d\n", obd->obd_name, isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending, i, pending[i], isblk); cfs_spin_unlock(&lqs->lqs_lock); /* for quota_search_lqs in pending_commit */ lqs_putref(lqs); /* for quota_search_lqs in quota_check */ lqs_putref(lqs); } cfs_gettimeofday(&work_end); timediff = cfs_timeval_sub(&work_end, &work_start, NULL); lprocfs_counter_add(qctxt->lqc_stats, isblk ? LQUOTA_WAIT_FOR_COMMIT_BLK : LQUOTA_WAIT_FOR_COMMIT_INO, timediff); RETURN(0); }
/** * check whether the left quota of certain uid and gid can satisfy a block_write * or inode_create rpc. When need to acquire quota, return QUOTA_RET_ACQUOTA */ static int quota_check_common(struct obd_device *obd, const unsigned int id[], int pending[], int count, int cycle, int isblk, struct inode *inode, int frags) { struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt; int i; struct qunit_data qdata[MAXQUOTAS]; int mb = 0; int rc = 0, rc2[2] = { 0, 0 }; ENTRY; cfs_spin_lock(&qctxt->lqc_lock); if (!qctxt->lqc_valid){ cfs_spin_unlock(&qctxt->lqc_lock); RETURN(rc); } cfs_spin_unlock(&qctxt->lqc_lock); for (i = 0; i < MAXQUOTAS; i++) { struct lustre_qunit_size *lqs = NULL; qdata[i].qd_id = id[i]; qdata[i].qd_flags = i; if (isblk) QDATA_SET_BLK(&qdata[i]); qdata[i].qd_count = 0; /* ignore root user */ if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i])) continue; lqs = quota_search_lqs(LQS_KEY(i, id[i]), qctxt, 0); if (lqs == NULL || IS_ERR(lqs)) continue; if (IS_ERR(lqs)) { CERROR("can not find lqs for check_common: " "[id %u] [%c] [isblk %d] [count %d] [rc %ld]\n", id[i], i % 2 ? 'g': 'u', isblk, count, PTR_ERR(lqs)); RETURN(PTR_ERR(lqs)); } rc2[i] = compute_remquota(obd, qctxt, &qdata[i], isblk); cfs_spin_lock(&lqs->lqs_lock); if (!cycle) { if (isblk) { pending[i] = count * CFS_PAGE_SIZE; /* in order to complete this write, we need extra * meta blocks. This function can get it through * data needed to be written b=16542 */ if (inode) { mb = pending[i]; rc = fsfilt_get_mblk(obd, qctxt->lqc_sb, &mb, inode, frags); if (rc) CERROR("%s: can't get extra " "meta blocks\n", obd->obd_name); else pending[i] += mb; } lqs->lqs_bwrite_pending += pending[i]; } else { pending[i] = count; lqs->lqs_iwrite_pending += pending[i]; } } /* if xx_rec < 0, that means quota are releasing, * and it may return before we use quota. So if * we find this situation, we assuming it has * returned b=18491 */ if (isblk && lqs->lqs_blk_rec < 0) { if (qdata[i].qd_count < -lqs->lqs_blk_rec) qdata[i].qd_count = 0; else qdata[i].qd_count += lqs->lqs_blk_rec; } if (!isblk && lqs->lqs_ino_rec < 0) { if (qdata[i].qd_count < -lqs->lqs_ino_rec) qdata[i].qd_count = 0; else qdata[i].qd_count += lqs->lqs_ino_rec; } CDEBUG(D_QUOTA, "[id %u] [%c] [isblk %d] [count %d]" " [lqs pending: %lu] [qd_count: "LPU64"] [metablocks: %d]" " [pending: %d]\n", id[i], i % 2 ? 'g': 'u', isblk, count, isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending, qdata[i].qd_count, mb, pending[i]); if (rc2[i] == QUOTA_RET_OK) { if (isblk && qdata[i].qd_count < lqs->lqs_bwrite_pending) rc2[i] = QUOTA_RET_ACQUOTA; if (!isblk && qdata[i].qd_count < lqs->lqs_iwrite_pending) rc2[i] = QUOTA_RET_ACQUOTA; } cfs_spin_unlock(&lqs->lqs_lock); if (lqs->lqs_blk_rec < 0 && qdata[i].qd_count < lqs->lqs_bwrite_pending - lqs->lqs_blk_rec - mb) OBD_FAIL_TIMEOUT(OBD_FAIL_QUOTA_DELAY_REL, 5); /* When cycle is zero, lqs_*_pending will be changed. We will * get reference of the lqs here and put reference of lqs in * quota_pending_commit b=14784 */ if (!cycle) lqs_getref(lqs); /* this is for quota_search_lqs */ lqs_putref(lqs); } if (rc2[0] == QUOTA_RET_ACQUOTA || rc2[1] == QUOTA_RET_ACQUOTA) RETURN(QUOTA_RET_ACQUOTA); else RETURN(rc); }
static int filter_quota_getflag(struct obd_device *obd, struct obdo *oa) { struct obd_device_target *obt = &obd->u.obt; struct lustre_quota_ctxt *qctxt = &obt->obt_qctxt; int err, cnt, rc = 0; struct obd_quotactl *oqctl; ENTRY; if (!ll_sb_any_quota_active(obt->obt_sb)) RETURN(0); OBD_ALLOC_PTR(oqctl); if (!oqctl) RETURN(-ENOMEM); /* set over quota flags for a uid/gid */ oa->o_valid |= OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA; oa->o_flags &= ~(OBD_FL_NO_USRQUOTA | OBD_FL_NO_GRPQUOTA); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { struct lustre_qunit_size *lqs = NULL; lqs = quota_search_lqs(LQS_KEY(cnt, GET_OA_ID(cnt, oa)), qctxt, 0); if (lqs == NULL || IS_ERR(lqs)) { rc = PTR_ERR(lqs); if (rc) CDEBUG(D_QUOTA, "search lqs for %s %d failed, " "(rc = %d)\n", cnt == USRQUOTA ? "user" : "group", cnt == USRQUOTA ? oa->o_uid : oa->o_gid, rc); break; } else { cfs_spin_lock(&lqs->lqs_lock); if (lqs->lqs_bunit_sz <= qctxt->lqc_sync_blk) { oa->o_flags |= (cnt == USRQUOTA) ? OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA; cfs_spin_unlock(&lqs->lqs_lock); CDEBUG(D_QUOTA, "set sync flag: bunit(%lu), " "sync_blk(%d)\n", lqs->lqs_bunit_sz, qctxt->lqc_sync_blk); /* this is for quota_search_lqs */ lqs_putref(lqs); continue; } cfs_spin_unlock(&lqs->lqs_lock); /* this is for quota_search_lqs */ lqs_putref(lqs); } memset(oqctl, 0, sizeof(*oqctl)); oqctl->qc_cmd = Q_GETQUOTA; oqctl->qc_type = cnt; oqctl->qc_id = (cnt == USRQUOTA) ? oa->o_uid : oa->o_gid; err = fsfilt_quotactl(obd, obt->obt_sb, oqctl); if (err) { if (!rc) rc = err; oa->o_valid &= ~((cnt == USRQUOTA) ? OBD_MD_FLUSRQUOTA : OBD_MD_FLGRPQUOTA); CDEBUG(D_QUOTA, "fsfilt getquota for %s %d failed, " "(rc = %d)\n", cnt == USRQUOTA ? "user" : "group", cnt == USRQUOTA ? oa->o_uid : oa->o_gid, err); continue; } if (oqctl->qc_dqblk.dqb_bhardlimit && (toqb(oqctl->qc_dqblk.dqb_curspace) >= oqctl->qc_dqblk.dqb_bhardlimit)) { oa->o_flags |= (cnt == USRQUOTA) ? OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA; CDEBUG(D_QUOTA, "out of quota for %s %d\n", cnt == USRQUOTA ? "user" : "group", cnt == USRQUOTA ? oa->o_uid : oa->o_gid); } } OBD_FREE_PTR(oqctl); RETURN(rc); }