예제 #1
0
/**
 * Try to consume local quota space.
 *
 * \param lqe   - is the qid entry to be processed
 * \param space - is the amount of quota space needed to complete the operation
 *
 * \retval 0       - success
 * \retval -EDQUOT - out of quota
 * \retval -EAGAIN - need to acquire space from master
 */
static int qsd_acquire_local(struct lquota_entry *lqe, __u64 space)
{
	__u64	usage;
	int	rc;
	ENTRY;

	if (!lqe->lqe_enforced)
		/* not enforced any more, we are good */
		RETURN(-ESRCH);

	lqe_write_lock(lqe);
	/* use latest usage */
	usage = lqe->lqe_usage;
	/* take pending write into account */
	usage += lqe->lqe_pending_write;

	if (space + usage <= lqe->lqe_granted - lqe->lqe_pending_rel) {
		/* Yay! we got enough space */
		lqe->lqe_pending_write += space;
		lqe->lqe_waiting_write -= space;
		rc = 0;
	/* lqe_edquot flag is used to avoid flooding dqacq requests when
	 * the user is over quota, however, the lqe_edquot could be stale
	 * sometimes due to the race reply of dqacq vs. id lock glimpse
	 * (see LU-4505), so we revalidate it every 5 seconds. */
	} else if (lqe->lqe_edquot &&
		   cfs_time_before_64(cfs_time_shift_64(-5),
			   	      lqe->lqe_edquot_time)) {
		rc = -EDQUOT;
	}else {
		rc = -EAGAIN;
	}
	lqe_write_unlock(lqe);

	RETURN(rc);
}
예제 #2
0
/**
 * Get file system statistics of OST server.
 *
 * Helper function for ofd_statfs(), also used by grant code.
 * Implements caching for statistics to avoid calling OSD device each time.
 *
 * \param[in]  env	  execution environment
 * \param[in]  ofd	  OFD device
 * \param[out] osfs	  statistic data to return
 * \param[in]  max_age	  maximum age for cached data
 * \param[in]  from_cache show that data was get from cache or not
 *
 * \retval		0 if successful
 * \retval		negative value on error
 */
int ofd_statfs_internal(const struct lu_env *env, struct ofd_device *ofd,
                        struct obd_statfs *osfs, __u64 max_age, int *from_cache)
{
	int rc = 0;
	ENTRY;

	down_read(&ofd->ofd_lastid_rwsem);
	/* Currently, for safe, we do not distinguish which LAST_ID is broken,
	 * we may do that in the future.
	 * Return -ENOSPC until the LAST_ID rebuilt. */
	if (unlikely(ofd->ofd_lastid_rebuilding))
		GOTO(out, rc = -ENOSPC);

	spin_lock(&ofd->ofd_osfs_lock);
	if (cfs_time_before_64(ofd->ofd_osfs_age, max_age) || max_age == 0) {
		u64 unstable;

		/* statfs data are too old, get up-to-date one.
		 * we must be cautious here since multiple threads might be
		 * willing to update statfs data concurrently and we must
		 * grant that cached statfs data are always consistent */

		if (ofd->ofd_statfs_inflight == 0)
			/* clear inflight counter if no users, although it would
			 * take a while to overflow this 64-bit counter ... */
			ofd->ofd_osfs_inflight = 0;
		/* notify ofd_grant_commit() that we want to track writes
		 * completed as of now */
		ofd->ofd_statfs_inflight++;
		/* record value of inflight counter before running statfs to
		 * compute the diff once statfs is completed */
		unstable = ofd->ofd_osfs_inflight;
		spin_unlock(&ofd->ofd_osfs_lock);

		/* statfs can sleep ... hopefully not for too long since we can
		 * call it fairly often as space fills up */
		rc = dt_statfs(env, ofd->ofd_osd, osfs);
		if (unlikely(rc))
			GOTO(out, rc);

		spin_lock(&ofd->ofd_grant_lock);
		spin_lock(&ofd->ofd_osfs_lock);
		/* calculate how much space was written while we released the
		 * ofd_osfs_lock */
		unstable = ofd->ofd_osfs_inflight - unstable;
		ofd->ofd_osfs_unstable = 0;
		if (unstable) {
			/* some writes completed while we were running statfs
			 * w/o the ofd_osfs_lock. Those ones got added to
			 * the cached statfs data that we are about to crunch.
			 * Take them into account in the new statfs data */
			osfs->os_bavail -= min_t(u64, osfs->os_bavail,
					       unstable >> ofd->ofd_blockbits);
			/* However, we don't really know if those writes got
			 * accounted in the statfs call, so tell
			 * ofd_grant_space_left() there is some uncertainty
			 * on the accounting of those writes.
			 * The purpose is to prevent spurious error messages in
			 * ofd_grant_space_left() since those writes might be
			 * accounted twice. */
			ofd->ofd_osfs_unstable += unstable;
		}
		/* similarly, there is some uncertainty on write requests
		 * between prepare & commit */
		ofd->ofd_osfs_unstable += ofd->ofd_tot_pending;
		spin_unlock(&ofd->ofd_grant_lock);

		/* finally udpate cached statfs data */
		ofd->ofd_osfs = *osfs;
		ofd->ofd_osfs_age = cfs_time_current_64();

		ofd->ofd_statfs_inflight--; /* stop tracking */
		if (ofd->ofd_statfs_inflight == 0)
			ofd->ofd_osfs_inflight = 0;
		spin_unlock(&ofd->ofd_osfs_lock);

		if (from_cache)
			*from_cache = 0;
	} else {
예제 #3
0
/**
 * Acquire quota space from master.
 * There are at most 1 in-flight dqacq/dqrel.
 *
 * \param env    - the environment passed by the caller
 * \param lqe    - is the qid entry to be processed
 *
 * \retval 0            - success
 * \retval -EDQUOT      - out of quota
 * \retval -EINPROGRESS - inform client to retry write/create
 * \retval -EBUSY       - already a quota request in flight
 * \retval -ve          - other appropriate errors
 */
static int qsd_acquire_remote(const struct lu_env *env,
			      struct lquota_entry *lqe)
{
	struct qsd_thread_info	*qti = qsd_info(env);
	struct quota_body	*qbody = &qti->qti_body;
	struct qsd_instance	*qsd;
	struct qsd_qtype_info	*qqi;
	int			 rc;
	ENTRY;

	memset(qbody, 0, sizeof(*qbody));
	rc = qsd_ready(lqe, &qbody->qb_glb_lockh);
	if (rc)
		RETURN(rc);

	qqi = lqe2qqi(lqe);
	qsd = qqi->qqi_qsd;

	lqe_write_lock(lqe);

	/* is quota really enforced for this id? */
	if (!lqe->lqe_enforced) {
		lqe_write_unlock(lqe);
		LQUOTA_DEBUG(lqe, "quota not enforced any more");
		RETURN(0);
	}

	/* fill qb_count & qb_flags */
	if (!qsd_calc_acquire(lqe, qbody)) {
		lqe_write_unlock(lqe);
		LQUOTA_DEBUG(lqe, "No acquire required");
		RETURN(0);
	}

	/* check whether an acquire request completed recently */
	if (lqe->lqe_acq_rc != 0 &&
	    cfs_time_before_64(cfs_time_shift_64(-1), lqe->lqe_acq_time)) {
		lqe_write_unlock(lqe);
		LQUOTA_DEBUG(lqe, "using cached return code %d", lqe->lqe_acq_rc);
		RETURN(lqe->lqe_acq_rc);
	}

	/* only 1 quota request in flight for a given ID is allowed */
	rc = qsd_request_enter(lqe);
	if (rc) {
		lqe_write_unlock(lqe);
		RETURN(rc);
	}

	lustre_handle_copy(&qti->qti_lockh, &lqe->lqe_lockh);
	lqe_write_unlock(lqe);

	/* hold a refcount until completion */
	lqe_getref(lqe);

	/* fill other quota body fields */
	qbody->qb_fid = qqi->qqi_fid;
	qbody->qb_id  = lqe->lqe_id;

	/* check whether we already own a valid lock for this ID */
	rc = qsd_id_lock_match(&qti->qti_lockh, &qbody->qb_lockh);
	if (rc) {
		struct lquota_lvb *lvb;

		OBD_ALLOC_PTR(lvb);
		if (lvb == NULL) {
			rc = -ENOMEM;
			qsd_req_completion(env, qqi, qbody, NULL,
					   &qti->qti_lockh, NULL, lqe, rc);
			RETURN(rc);
		}
		/* no lock found, should use intent */
		rc = qsd_intent_lock(env, qsd->qsd_exp, qbody, true,
				     IT_QUOTA_DQACQ, qsd_req_completion,
				     qqi, lvb, (void *)lqe);
	} else {
		/* lock found, should use regular dqacq */
		rc = qsd_send_dqacq(env, qsd->qsd_exp, qbody, true,
				    qsd_req_completion, qqi, &qti->qti_lockh,
				    lqe);
	}

	/* the completion function will be called by qsd_send_dqacq or
	 * qsd_intent_lock */
	RETURN(rc);
}