示例#1
0
static dmu_buf_t *osd_mkreg(const struct lu_env *env, struct osd_object *obj,
			    struct lu_attr *la, uint64_t parent,
			    struct osd_thandle *oh)
{
	dmu_buf_t	  *db;
	int		   rc;
	struct osd_device *osd = osd_obj2dev(obj);

	LASSERT(S_ISREG(la->la_mode));
	rc = __osd_object_create(env, obj, &db, oh->ot_tx, la, parent);
	if (rc)
		return ERR_PTR(rc);

	/*
	 * XXX: This heuristic is non-optimal.  It would be better to
	 * increase the blocksize up to osd->od_max_blksz during the write.
	 * This is exactly how the ZPL behaves and it ensures that the right
	 * blocksize is selected based on the file size rather than the
	 * making broad assumptions based on the osd type.
	 */
	if (!lu_device_is_md(osd2lu_dev(osd))) {
		rc = -dmu_object_set_blocksize(osd->od_os, db->db_object,
					       osd->od_max_blksz, 0, oh->ot_tx);
		if (unlikely(rc)) {
			CERROR("%s: can't change blocksize: %d\n",
			       osd->od_svname, rc);
			return ERR_PTR(rc);
		}
	}

	return db;
}
示例#2
0
static dmu_buf_t* osd_mkreg(const struct lu_env *env, struct osd_device *osd,
			    struct lu_attr *la, struct osd_thandle *oh)
{
	dmu_buf_t *db;
	int	    rc;

	LASSERT(S_ISREG(la->la_mode));
	rc = __osd_object_create(env, &osd->od_objset, &db, oh->ot_tx, la,
				 osd_obj_tag);
	if (rc)
		return ERR_PTR(rc);

	/*
	 * XXX: a hack, OST to use bigger blocksize. we need
	 * a method in OSD API to control this from OFD/MDD
	 */
	if (!lu_device_is_md(osd2lu_dev(osd))) {
		rc = -dmu_object_set_blocksize(osd->od_objset.os,
					       db->db_object,
				128 << 10, 0, oh->ot_tx);
		if (unlikely(rc)) {
			CERROR("%s: can't change blocksize: %d\n",
			       osd->od_svname, rc);
			return ERR_PTR(rc);
		}
	}

	return db;
}
示例#3
0
static dmu_buf_t *osd_mkreg(const struct lu_env *env, struct osd_object *obj,
			    struct lu_attr *la, uint64_t parent,
			    struct osd_thandle *oh)
{
	dmu_buf_t	  *db;
	int		   rc;
	struct osd_device *osd = osd_obj2dev(obj);

	LASSERT(S_ISREG(la->la_mode));
	rc = __osd_object_create(env, obj, &db, oh->ot_tx, la, parent);
	if (rc)
		return ERR_PTR(rc);

	if (!lu_device_is_md(osd2lu_dev(osd))) {
		/* uses 4K as default block size because clients write data
		 * with page size that is 4K at minimum */
		rc = -dmu_object_set_blocksize(osd->od_os, db->db_object,
					       4096, 0, oh->ot_tx);
		if (unlikely(rc)) {
			CERROR("%s: can't change blocksize: %d\n",
			       osd->od_svname, rc);
			return ERR_PTR(rc);
		}
	}

	return db;
}
示例#4
0
/*
 * Initialize on-disk structures in order to manage quota enforcement for
 * the target associated with the qsd instance \qsd and starts the reintegration
 * procedure for each quota type as soon as possible.
 * The last step of the reintegration will be completed once qsd_start() is
 * called, at which points the space reconciliation with the master will be
 * executed.
 * This function must be called when the server stack is fully configured,
 * typically when ->ldo_prepare is called across the stack.
 *
 * \param env - the environment passed by the caller
 * \param qsd - is qsd_instance to prepare
 *
 * \retval - 0 on success, appropriate error on failure
 */
int qsd_prepare(const struct lu_env *env, struct qsd_instance *qsd)
{
	struct qsd_thread_info	*qti = qsd_info(env);
	int			 qtype, rc = 0;
	ENTRY;

	if (unlikely(qsd == NULL))
		RETURN(0);

	read_lock(&qsd->qsd_lock);
	if (qsd->qsd_prepared) {
		CERROR("%s: qsd instance already prepared\n", qsd->qsd_svname);
		rc = -EALREADY;
	}
	read_unlock(&qsd->qsd_lock);
	if (rc)
		RETURN(rc);

	/* Record whether this qsd instance is managing quota enforcement for a
	 * MDT (i.e. inode quota) or OST (block quota) */
	if (lu_device_is_md(qsd->qsd_dev->dd_lu_dev.ld_site->ls_top_dev)) {
		qsd->qsd_is_md = true;
		qsd->qsd_sync_threshold = LQUOTA_LEAST_QUNIT(LQUOTA_RES_MD);
	} else {
		qsd->qsd_sync_threshold = LQUOTA_LEAST_QUNIT(LQUOTA_RES_DT);
	}

	/* look-up on-disk directory for the quota slave */
	qsd->qsd_root = lquota_disk_dir_find_create(env, qsd->qsd_dev, NULL,
						    QSD_DIR);
	if (IS_ERR(qsd->qsd_root)) {
		rc = PTR_ERR(qsd->qsd_root);
		qsd->qsd_root = NULL;
		CERROR("%s: failed to create quota slave root dir (%d)\n",
		       qsd->qsd_svname, rc);
		RETURN(rc);
	}

	/* initialize per-quota type data */
	for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++) {
		rc = qsd_qtype_init(env, qsd, qtype);
		if (rc)
			RETURN(rc);
	}

	/* pools successfully setup, mark the qsd as prepared */
	write_lock(&qsd->qsd_lock);
	qsd->qsd_prepared = true;
	write_unlock(&qsd->qsd_lock);

	/* start reintegration thread for each type, if required */
	for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++) {
		struct qsd_qtype_info	*qqi = qsd->qsd_type_array[qtype];

		if (qsd_type_enabled(qsd, qtype) && qsd->qsd_acct_failed) {
			LCONSOLE_ERROR("%s: can't enable quota enforcement "
				       "since space accounting isn't functional"
				       ". Please run tunefs.lustre --quota on "
				       "an unmounted filesystem if not done "
				       "already\n", qsd->qsd_svname);
			break;
		}

		rc = qsd_start_reint_thread(qqi);
		if (rc) {
			CERROR("%s: failed to start reint thread for type %s "
			       "(%d)\n", qsd->qsd_svname, QTYPE_NAME(qtype),
			       rc);
			RETURN(rc);
		}
	}

	/* start writeback thread */
	rc = qsd_start_upd_thread(qsd);
	if (rc) {
		CERROR("%s: failed to start writeback thread (%d)\n",
		       qsd->qsd_svname, rc);
		RETURN(rc);
	}

	/* generate osp name */
	rc = tgt_name2lwp_name(qsd->qsd_svname, qti->qti_buf,
			       MTI_NAME_MAXLEN, 0);
	if (rc) {
		CERROR("%s: failed to generate ospname (%d)\n",
		       qsd->qsd_svname, rc);
		RETURN(rc);
	}

	/* the connection callback will start the reintegration
	 * procedure if quota is enabled */
	rc = lustre_register_lwp_item(qti->qti_buf, &qsd->qsd_exp,
				      qsd_conn_callback, (void *)qsd);
	if (rc) {
		CERROR("%s: fail to get connection to master (%d)\n",
		       qsd->qsd_svname, rc);
		RETURN(rc);
	}

	RETURN(0);
}
/*
 * Helper routine to retrieve slave information.
 * This function converts a quotactl request into quota/accounting object
 * operations. It is independant of the slave stack which is only accessible
 * from the OSD layer.
 *
 * \param env   - is the environment passed by the caller
 * \param dev   - is the dt_device this quotactl is executed on
 * \param oqctl - is the quotactl request
 */
int lquotactl_slv(const struct lu_env *env, struct dt_device *dev,
		  struct obd_quotactl *oqctl)
{
	struct lquota_thread_info	*qti = lquota_info(env);
	__u64				 key;
	struct dt_object		*obj;
	struct obd_dqblk		*dqblk = &oqctl->qc_dqblk;
	int				 rc;
	ENTRY;

	if (oqctl->qc_cmd != Q_GETOQUOTA) {
		/* as in many other places, dev->dd_lu_dev.ld_obd->obd_name
		 * point to an invalid obd_name, to be fixed in LU-1574 */
		CERROR("%s: Unsupported quotactl command: %x\n",
		       dev->dd_lu_dev.ld_obd->obd_name, oqctl->qc_cmd);
		RETURN(-EOPNOTSUPP);
	}

	if (oqctl->qc_type < 0 || oqctl->qc_type >= LL_MAXQUOTAS)
		RETURN(-EOPNOTSUPP);

	/* qc_id is a 32-bit field while a key has 64 bits */
	key = oqctl->qc_id;

	/* Step 1: collect accounting information */

	obj = acct_obj_lookup(env, dev, oqctl->qc_type);
	if (IS_ERR(obj))
		RETURN(-EOPNOTSUPP);
	if (obj->do_index_ops == NULL)
		GOTO(out, rc = -EINVAL);

	/* lookup record storing space accounting information for this ID */
	rc = dt_lookup(env, obj, (struct dt_rec *)&qti->qti_acct_rec,
		       (struct dt_key *)&key);
	if (rc < 0)
		GOTO(out, rc);

	memset(&oqctl->qc_dqblk, 0, sizeof(struct obd_dqblk));
	dqblk->dqb_curspace	= qti->qti_acct_rec.bspace;
	dqblk->dqb_curinodes	= qti->qti_acct_rec.ispace;
	dqblk->dqb_valid	= QIF_USAGE;

	dt_object_put(env, obj);

	/* Step 2: collect enforcement information */

	obj = quota_obj_lookup(env, dev, oqctl->qc_type);
	if (IS_ERR(obj))
		RETURN(0);
	if (obj->do_index_ops == NULL)
		GOTO(out, rc = 0);

	memset(&qti->qti_slv_rec, 0, sizeof(qti->qti_slv_rec));
	/* lookup record storing enforcement information for this ID */
	rc = dt_lookup(env, obj, (struct dt_rec *)&qti->qti_slv_rec,
		       (struct dt_key *)&key);
	if (rc < 0 && rc != -ENOENT)
		GOTO(out, rc = 0);

	if (lu_device_is_md(dev->dd_lu_dev.ld_site->ls_top_dev)) {
		dqblk->dqb_ihardlimit = qti->qti_slv_rec.qsr_granted;
		dqblk->dqb_bhardlimit = 0;
	} else {
		dqblk->dqb_ihardlimit = 0;
		dqblk->dqb_bhardlimit = qti->qti_slv_rec.qsr_granted;
	}
	dqblk->dqb_valid |= QIF_LIMITS;

	GOTO(out, rc = 0);
out:
	dt_object_put(env, obj);
	return rc;
}