Example #1
0
/**
 * Implements cl_io_operations::cio_prepare_write() method for osc layer.
 *
 * \retval -EIO transfer initiated against this osc will most likely fail
 * \retval 0    transfer initiated against this osc will most likely succeed.
 *
 * The reason for this check is to immediately return an error to the caller
 * in the case of a deactivated import. Note, that import can be deactivated
 * later, while pages, dirtied by this IO, are still in the cache, but this is
 * irrelevant, because that would still return an error to the application (if
 * it does fsync), but many applications don't do fsync because of performance
 * issues, and we wanted to return an -EIO at write time to notify the
 * application.
 */
static int osc_io_prepare_write(const struct lu_env *env,
				const struct cl_io_slice *ios,
				const struct cl_page_slice *slice,
				unsigned from, unsigned to)
{
	struct osc_device *dev = lu2osc_dev(slice->cpl_obj->co_lu.lo_dev);
	struct obd_import *imp = class_exp2cliimp(dev->od_exp);
	struct osc_io     *oio = cl2osc_io(env, ios);
	int result = 0;

	/*
	 * This implements OBD_BRW_CHECK logic from old client.
	 */

	if (imp == NULL || imp->imp_invalid)
		result = -EIO;
	if (result == 0 && oio->oi_lockless)
		/* this page contains `invalid' data, but who cares?
		 * nobody can access the invalid data.
		 * in osc_io_commit_write(), we're going to write exact
		 * [from, to) bytes of this page to OST. -jay */
		cl_page_export(env, slice->cpl_page, 1);

	return result;
}
Example #2
0
int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
                   struct obd_quotactl *oqctl)
{
        struct client_obd       *cli = &exp->exp_obd->u.cli;
        struct ptlrpc_request   *req;
        struct obd_quotactl     *body;
        int                      rc;
        ENTRY;

        req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
                                        &RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION,
                                        OST_QUOTACHECK);
        if (req == NULL)
                RETURN(-ENOMEM);

        body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
        *body = *oqctl;

        ptlrpc_request_set_replen(req);

        /* the next poll will find -ENODATA, that means quotacheck is
         * going on */
        cli->cl_qchk_stat = -ENODATA;
        rc = ptlrpc_queue_wait(req);
        if (rc)
                cli->cl_qchk_stat = rc;
        ptlrpc_req_finished(req);
        RETURN(rc);
}
Example #3
0
/**
 * Check whether a qsd instance is all set to send quota request to master.
 * This includes checking whether:
 * - the connection to master is set up and usable,
 * - the qsd isn't stopping
 * - reintegration has been successfully completed and all indexes are
 *   up-to-date
 *
 * \param lqe - is the lquota entry for which we would like to send an quota
 *              request
 * \param lockh - is the remote handle of the global lock returned on success
 *
 * \retval 0 on success, appropriate error on failure
 */
static int qsd_ready(struct lquota_entry *lqe, struct lustre_handle *lockh)
{
	struct qsd_qtype_info	*qqi = lqe2qqi(lqe);
	struct qsd_instance	*qsd = qqi->qqi_qsd;
	struct obd_import	*imp = NULL;
	struct ldlm_lock	*lock;
	ENTRY;

	read_lock(&qsd->qsd_lock);
	/* is the qsd about to shut down? */
	if (qsd->qsd_stopping) {
		read_unlock(&qsd->qsd_lock);
		LQUOTA_DEBUG(lqe, "dropping quota req since qsd is stopping");
		/* Target is about to shut down, client will retry */
		RETURN(-EINPROGRESS);
	}

	/* is the connection to the quota master ready? */
	if (qsd->qsd_exp_valid)
		imp = class_exp2cliimp(qsd->qsd_exp);
	if (imp == NULL || imp->imp_invalid) {
		read_unlock(&qsd->qsd_lock);
		LQUOTA_DEBUG(lqe, "connection to master not ready");
		RETURN(-ENOTCONN);
	}

	/* In most case, reintegration must have been triggered (when enable
	 * quota or on OST start), however, in rare race condition (enabling
	 * quota when starting OSTs), we might miss triggering reintegration
	 * for some qqi.
	 *
	 * If the previous reintegration failed for some reason, we'll
	 * re-trigger it here as well. */
	if (!qqi->qqi_glb_uptodate || !qqi->qqi_slv_uptodate) {
		read_unlock(&qsd->qsd_lock);
		LQUOTA_DEBUG(lqe, "not up-to-date, dropping request and "
			     "kicking off reintegration");
		qsd_start_reint_thread(qqi);
		RETURN(-EINPROGRESS);
	}

	/* Fill the remote global lock handle, master will check this handle
	 * to see if the slave is sending request with stale lock */
	lustre_handle_copy(lockh, &qqi->qqi_lockh);
	read_unlock(&qsd->qsd_lock);

	if (!lustre_handle_is_used(lockh))
		RETURN(-ENOLCK);

	lock = ldlm_handle2lock(lockh);
	if (lock == NULL)
		RETURN(-ENOLCK);

	/* return remote lock handle to be packed in quota request */
	lustre_handle_copy(lockh, &lock->l_remote_handle);
	LDLM_LOCK_PUT(lock);

	RETURN(0);
}
Example #4
0
int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
		void *ea, size_t ealen, struct ptlrpc_request **request)
{
	struct list_head cancels = LIST_HEAD_INIT(cancels);
        struct ptlrpc_request *req;
        struct mdc_rpc_lock *rpc_lock;
        struct obd_device *obd = exp->exp_obd;
        int count = 0, rc;
        __u64 bits;
        ENTRY;

        LASSERT(op_data != NULL);

        bits = MDS_INODELOCK_UPDATE;
        if (op_data->op_attr.ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID))
                bits |= MDS_INODELOCK_LOOKUP;
	if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
	    (fid_is_sane(&op_data->op_fid1)))
		count = mdc_resource_get_unused(exp, &op_data->op_fid1,
						&cancels, LCK_EX, bits);
        req = ptlrpc_request_alloc(class_exp2cliimp(exp),
                                   &RQF_MDS_REINT_SETATTR);
        if (req == NULL) {
                ldlm_lock_list_put(&cancels, l_bl_ast, count);
                RETURN(-ENOMEM);
        }
        mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
	req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT, 0);
	req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen);
	req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT, 0);

	rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
	if (rc) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}

        rpc_lock = obd->u.cli.cl_rpc_lock;

        if (op_data->op_attr.ia_valid & (ATTR_MTIME | ATTR_CTIME))
                CDEBUG(D_INODE, "setting mtime "CFS_TIME_T
                       ", ctime "CFS_TIME_T"\n",
                       LTIME_S(op_data->op_attr.ia_mtime),
                       LTIME_S(op_data->op_attr.ia_ctime));
	mdc_setattr_pack(req, op_data, ea, ealen);

        ptlrpc_request_set_replen(req);

        rc = mdc_reint(req, rpc_lock, LUSTRE_IMP_FULL);
	if (rc == -ERESTARTSYS)
                rc = 0;

        *request = req;

	RETURN(rc);
}
Example #5
0
int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
             struct ptlrpc_request **request)
{
	struct list_head cancels = LIST_HEAD_INIT(cancels);
        struct ptlrpc_request *req;
        int count = 0, rc;
        ENTRY;

        if ((op_data->op_flags & MF_MDC_CANCEL_FID2) &&
            (fid_is_sane(&op_data->op_fid2)))
                count = mdc_resource_get_unused(exp, &op_data->op_fid2,
                                                &cancels, LCK_EX,
                                                MDS_INODELOCK_UPDATE);
        if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
            (fid_is_sane(&op_data->op_fid1)))
                count += mdc_resource_get_unused(exp, &op_data->op_fid1,
                                                 &cancels, LCK_EX,
                                                 MDS_INODELOCK_UPDATE);

        req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_LINK);
        if (req == NULL) {
                ldlm_lock_list_put(&cancels, l_bl_ast, count);
                RETURN(-ENOMEM);
        }

        req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
                             op_data->op_namelen + 1);

	/* get SELinux policy info if any */
	rc = sptlrpc_get_sepol(req);
	if (rc < 0) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}
	req_capsule_set_size(&req->rq_pill, &RMF_SELINUX_POL, RCL_CLIENT,
			     strlen(req->rq_sepol) ?
			     strlen(req->rq_sepol) + 1 : 0);

	rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
	if (rc) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}

        mdc_link_pack(req, op_data);
        ptlrpc_request_set_replen(req);

	rc = mdc_reint(req, LUSTRE_IMP_FULL);
        *request = req;
        if (rc == -ERESTARTSYS)
                rc = 0;

        RETURN(rc);
}
Example #6
0
int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
               struct ptlrpc_request **request)
{
        CFS_LIST_HEAD(cancels);
        struct obd_device *obd = class_exp2obd(exp);
        struct ptlrpc_request *req = *request;
        int count = 0, rc;
        ENTRY;

        LASSERT(req == NULL);

	if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
	    (fid_is_sane(&op_data->op_fid1)) &&
	    !OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
		count = mdc_resource_get_unused(exp, &op_data->op_fid1,
						&cancels, LCK_EX,
						MDS_INODELOCK_UPDATE);
	if ((op_data->op_flags & MF_MDC_CANCEL_FID3) &&
	    (fid_is_sane(&op_data->op_fid3)) &&
	    !OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
		count += mdc_resource_get_unused(exp, &op_data->op_fid3,
						 &cancels, LCK_EX,
						 MDS_INODELOCK_FULL);
        req = ptlrpc_request_alloc(class_exp2cliimp(exp),
                                   &RQF_MDS_REINT_UNLINK);
        if (req == NULL) {
                ldlm_lock_list_put(&cancels, l_bl_ast, count);
                RETURN(-ENOMEM);
        }
        mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
        req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
                             op_data->op_namelen + 1);

	rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
	if (rc) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}

        mdc_unlink_pack(req, op_data);

        req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
                             obd->u.cli.cl_max_mds_easize);
        req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
                             obd->u.cli.cl_max_mds_cookiesize);
        ptlrpc_request_set_replen(req);

        *request = req;

        rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
        if (rc == -ERESTARTSYS)
                rc = 0;
        RETURN(rc);
}
Example #7
0
/*
 * Send non-intent quota request to master.
 *
 * \param env    - the environment passed by the caller
 * \param exp    - is the export to use to send the acquire RPC
 * \param qbody  - quota body to be packed in request
 * \param sync   - synchronous or asynchronous
 * \param completion - completion callback
 * \param qqi    - is the qsd_qtype_info structure to pass to the completion
 *                 function
 * \param lqe    - is the qid entry to be processed
 *
 * \retval 0     - success
 * \retval -ve   - appropriate errors
 */
int qsd_send_dqacq(const struct lu_env *env, struct obd_export *exp,
		   struct quota_body *qbody, bool sync,
		   qsd_req_completion_t completion, struct qsd_qtype_info *qqi,
		   struct lustre_handle *lockh, struct lquota_entry *lqe)
{
	struct ptlrpc_request	*req;
	struct quota_body	*req_qbody;
	struct qsd_async_args	*aa;
	int			 rc;
	ENTRY;

	LASSERT(exp);

	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_QUOTA_DQACQ);
	if (req == NULL)
		GOTO(out, rc = -ENOMEM);

	req->rq_no_resend = req->rq_no_delay = 1;
	req->rq_no_retry_einprogress = 1;
	rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, QUOTA_DQACQ);
	if (rc) {
		ptlrpc_request_free(req);
		GOTO(out, rc);
	}

	req->rq_request_portal = MDS_READPAGE_PORTAL;
	req_qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY);
	*req_qbody = *qbody;

	ptlrpc_request_set_replen(req);

	CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
	aa = ptlrpc_req_async_args(req);
	aa->aa_exp = exp;
	aa->aa_qqi = qqi;
	aa->aa_arg = (void *)lqe;
	aa->aa_completion = completion;
	lustre_handle_copy(&aa->aa_lockh, lockh);

	if (sync) {
		rc = ptlrpc_queue_wait(req);
		rc = qsd_dqacq_interpret(env, req, aa, rc);
		ptlrpc_req_finished(req);
	} else {
		req->rq_interpret_reply = qsd_dqacq_interpret;
		ptlrpcd_add_req(req);
	}

	RETURN(rc);
out:
	completion(env, qqi, qbody, NULL, lockh, NULL, lqe, rc);
	return rc;
}
Example #8
0
static int osc_io_data_version_start(const struct lu_env *env,
				     const struct cl_io_slice *slice)
{
	struct cl_data_version_io *dv	= &slice->cis_io->u.ci_data_version;
	struct osc_io		*oio	= cl2osc_io(env, slice);
	struct obdo		*oa	= &oio->oi_oa;
	struct osc_async_cbargs	*cbargs	= &oio->oi_cbarg;
	struct osc_object	*obj	= cl2osc(slice->cis_obj);
	struct lov_oinfo	*loi	= obj->oo_oinfo;
	struct obd_export	*exp	= osc_export(obj);
	struct ptlrpc_request	*req;
	struct ost_body		*body;
	struct osc_data_version_args *dva;
	int rc;

	ENTRY;
	memset(oa, 0, sizeof(*oa));
	oa->o_oi = loi->loi_oi;
	oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;

	if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
		oa->o_valid |= OBD_MD_FLFLAGS;
		oa->o_flags |= OBD_FL_SRVLOCK;
		if (dv->dv_flags & LL_DV_WR_FLUSH)
			oa->o_flags |= OBD_FL_FLUSH;
	}

	init_completion(&cbargs->opc_sync);

	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
	if (req == NULL)
		RETURN(-ENOMEM);

	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
	if (rc < 0) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}

	body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
	lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);

	ptlrpc_request_set_replen(req);
	req->rq_interpret_reply = osc_data_version_interpret;
	CLASSERT(sizeof(*dva) <= sizeof(req->rq_async_args));
	dva = ptlrpc_req_async_args(req);
	dva->dva_oio = oio;

	ptlrpcd_add_req(req);

	RETURN(0);
}
Example #9
0
int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
	       struct ptlrpc_request **request)
{
	LIST_HEAD(cancels);
	struct obd_device *obd = class_exp2obd(exp);
	struct ptlrpc_request *req = *request;
	int count = 0, rc;

	LASSERT(!req);

	if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
	    (fid_is_sane(&op_data->op_fid1)))
		count = mdc_resource_get_unused(exp, &op_data->op_fid1,
						&cancels, LCK_EX,
						MDS_INODELOCK_UPDATE);
	if ((op_data->op_flags & MF_MDC_CANCEL_FID3) &&
	    (fid_is_sane(&op_data->op_fid3)))
		count += mdc_resource_get_unused(exp, &op_data->op_fid3,
						 &cancels, LCK_EX,
						 MDS_INODELOCK_FULL);
	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
				   &RQF_MDS_REINT_UNLINK);
	if (!req) {
		ldlm_lock_list_put(&cancels, l_bl_ast, count);
		return -ENOMEM;
	}
	req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
			     op_data->op_namelen + 1);

	rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
	if (rc) {
		ptlrpc_request_free(req);
		return rc;
	}

	mdc_unlink_pack(req, op_data);

	req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
			     obd->u.cli.cl_default_mds_easize);
	ptlrpc_request_set_replen(req);

	*request = req;

	rc = mdc_reint(req, LUSTRE_IMP_FULL);
	if (rc == -ERESTARTSYS)
		rc = 0;
	return rc;
}
Example #10
0
int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
		void *ea, size_t ealen, struct ptlrpc_request **request)
{
	LIST_HEAD(cancels);
	struct ptlrpc_request *req;
	int count = 0, rc;
	__u64 bits;

	bits = MDS_INODELOCK_UPDATE;
	if (op_data->op_attr.ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
		bits |= MDS_INODELOCK_LOOKUP;
	if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
	    (fid_is_sane(&op_data->op_fid1)))
		count = mdc_resource_get_unused(exp, &op_data->op_fid1,
						&cancels, LCK_EX, bits);
	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
				   &RQF_MDS_REINT_SETATTR);
	if (!req) {
		ldlm_lock_list_put(&cancels, l_bl_ast, count);
		return -ENOMEM;
	}
	req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT, 0);
	req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen);
	req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT, 0);

	rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
	if (rc) {
		ptlrpc_request_free(req);
		return rc;
	}

	if (op_data->op_attr.ia_valid & (ATTR_MTIME | ATTR_CTIME))
		CDEBUG(D_INODE, "setting mtime %ld, ctime %ld\n",
		       LTIME_S(op_data->op_attr.ia_mtime),
		       LTIME_S(op_data->op_attr.ia_ctime));
	mdc_setattr_pack(req, op_data, ea, ealen);

	ptlrpc_request_set_replen(req);

	rc = mdc_reint(req, LUSTRE_IMP_FULL);

	if (rc == -ERESTARTSYS)
		rc = 0;

	*request = req;

	return rc;
}
Example #11
0
int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
             struct ptlrpc_request **request)
{
        CFS_LIST_HEAD(cancels);
        struct obd_device *obd = exp->exp_obd;
        struct ptlrpc_request *req;
        int count = 0, rc;
        ENTRY;

        if ((op_data->op_flags & MF_MDC_CANCEL_FID2) &&
            (fid_is_sane(&op_data->op_fid2)))
                count = mdc_resource_get_unused(exp, &op_data->op_fid2,
                                                &cancels, LCK_EX,
                                                MDS_INODELOCK_UPDATE);
        if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
            (fid_is_sane(&op_data->op_fid1)))
                count += mdc_resource_get_unused(exp, &op_data->op_fid1,
                                                 &cancels, LCK_EX,
                                                 MDS_INODELOCK_UPDATE);

        req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_LINK);
        if (req == NULL) {
                ldlm_lock_list_put(&cancels, l_bl_ast, count);
                RETURN(-ENOMEM);
        }
        mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
        mdc_set_capa_size(req, &RMF_CAPA2, op_data->op_capa2);
        req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
                             op_data->op_namelen + 1);

        rc = mdc_prep_elc_req(exp, req, &cancels, count);
        if (rc) {
                ptlrpc_request_free(req);
                RETURN(rc);
        }

        mdc_link_pack(req, op_data);
        ptlrpc_request_set_replen(req);

        rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
        *request = req;
        if (rc == -ERESTARTSYS)
                rc = 0;

        RETURN(rc);
}
Example #12
0
int client_quota_check(struct obd_device *unused, struct obd_export *exp,
                       struct obd_quotactl *oqctl)
{
        struct client_obd       *cli = &exp->exp_obd->u.cli;
        struct ptlrpc_request   *req;
        struct obd_quotactl     *body;
        const struct req_format *rf;
        int                      ver, opc, rc;
        ENTRY;

        if (!strcmp(exp->exp_obd->obd_type->typ_name, LUSTRE_MDC_NAME)) {
                rf  = &RQF_MDS_QUOTACHECK;
                ver = LUSTRE_MDS_VERSION;
                opc = MDS_QUOTACHECK;
        } else if (!strcmp(exp->exp_obd->obd_type->typ_name, LUSTRE_OSC_NAME)) {
                rf  = &RQF_OST_QUOTACHECK;
                ver = LUSTRE_OST_VERSION;
                opc = OST_QUOTACHECK;
        } else {
                RETURN(-EINVAL);
        }

        req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), rf, ver, opc);
        if (req == NULL)
                RETURN(-ENOMEM);

        body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
        *body = *oqctl;

        ptlrpc_request_set_replen(req);

        /* the next poll will find -ENODATA, that means quotacheck is
         * going on */
        cli->cl_qchk_stat = -ENODATA;
        rc = ptlrpc_queue_wait(req);
        if (rc)
                cli->cl_qchk_stat = rc;
        ptlrpc_req_finished(req);
        RETURN(rc);
}
Example #13
0
int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
                 struct obd_quotactl *oqctl)
{
        struct ptlrpc_request *req;
        struct obd_quotactl   *oqc;
        int                    rc;
        ENTRY;

        req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
                                        &RQF_OST_QUOTACTL, LUSTRE_OST_VERSION,
                                        OST_QUOTACTL);
        if (req == NULL)
                RETURN(-ENOMEM);

        oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
        *oqc = *oqctl;

        ptlrpc_request_set_replen(req);
        ptlrpc_at_set_req_timeout(req);
        req->rq_no_resend = 1;

        rc = ptlrpc_queue_wait(req);
        if (rc)
                CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc);

        if (req->rq_repmsg &&
            (oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL))) {
                *oqctl = *oqc;
        } else if (!rc) {
                CERROR ("Can't unpack obd_quotactl\n");
                rc = -EPROTO;
        }
        ptlrpc_req_finished(req);

        RETURN(rc);
}
Example #14
0
int client_quota_adjust_qunit(struct obd_export *exp,
                              struct quota_adjust_qunit *oqaq,
                              struct lustre_quota_ctxt *qctxt,
                              struct ptlrpc_request_set *rqset)
{
        struct ptlrpc_request *req;
        struct quota_adjust_qunit *oqa;
        int rc = 0;
        ENTRY;

        /* client don't support this kind of operation, abort it */
        if (!(exp->exp_connect_flags & OBD_CONNECT_CHANGE_QS)) {
                CDEBUG(D_QUOTA, "osc: %s don't support change qunit size\n",
                       exp->exp_obd->obd_name);
                RETURN(rc);
        }
        if (strcmp(exp->exp_obd->obd_type->typ_name, LUSTRE_OSC_NAME))
                RETURN(-EINVAL);

        LASSERT(rqset);

        req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
                                        &RQF_OST_QUOTA_ADJUST_QUNIT,
                                        LUSTRE_OST_VERSION,
                                        OST_QUOTA_ADJUST_QUNIT);
        if (req == NULL)
                RETURN(-ENOMEM);

        oqa = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_ADJUST_QUNIT);
        *oqa = *oqaq;

        ptlrpc_request_set_replen(req);

        ptlrpc_set_add_req(rqset, req);
        RETURN (rc);
}
Example #15
0
static int seq_client_rpc(struct lu_client_seq *seq,
                          struct lu_seq_range *output, __u32 opc,
                          const char *opcname)
{
	struct obd_export     *exp = seq->lcs_exp;
	struct ptlrpc_request *req;
	struct lu_seq_range   *out, *in;
	__u32                 *op;
	unsigned int           debug_mask;
	int                    rc;
	ENTRY;

	LASSERT(exp != NULL && !IS_ERR(exp));
	req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
					LUSTRE_MDS_VERSION, SEQ_QUERY);
	if (req == NULL)
		RETURN(-ENOMEM);

	/* Init operation code */
	op = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_OPC);
	*op = opc;

	/* Zero out input range, this is not recovery yet. */
	in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE);
	lu_seq_range_init(in);

	ptlrpc_request_set_replen(req);

	in->lsr_index = seq->lcs_space.lsr_index;
	if (seq->lcs_type == LUSTRE_SEQ_METADATA)
		fld_range_set_mdt(in);
	else
		fld_range_set_ost(in);

	if (opc == SEQ_ALLOC_SUPER) {
		req->rq_request_portal = SEQ_CONTROLLER_PORTAL;
		req->rq_reply_portal = MDC_REPLY_PORTAL;
		/* During allocating super sequence for data object,
		 * the current thread might hold the export of MDT0(MDT0
		 * precreating objects on this OST), and it will send the
		 * request to MDT0 here, so we can not keep resending the
		 * request here, otherwise if MDT0 is failed(umounted),
		 * it can not release the export of MDT0 */
		if (seq->lcs_type == LUSTRE_SEQ_DATA)
			req->rq_no_delay = req->rq_no_resend = 1;
		debug_mask = D_CONSOLE;
	} else {
		if (seq->lcs_type == LUSTRE_SEQ_METADATA) {
			req->rq_reply_portal = MDC_REPLY_PORTAL;
			req->rq_request_portal = SEQ_METADATA_PORTAL;
		} else {
			req->rq_reply_portal = OSC_REPLY_PORTAL;
			req->rq_request_portal = SEQ_DATA_PORTAL;
		}

		debug_mask = D_INFO;
	}

	/* Allow seq client RPC during recovery time. */
	req->rq_allow_replay = 1;

	ptlrpc_at_set_req_timeout(req);

	rc = ptlrpc_queue_wait(req);

	if (rc)
		GOTO(out_req, rc);

	out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE);
	*output = *out;

	if (!lu_seq_range_is_sane(output)) {
		CERROR("%s: Invalid range received from server: "
		       DRANGE"\n", seq->lcs_name, PRANGE(output));
		GOTO(out_req, rc = -EINVAL);
	}

	if (lu_seq_range_is_exhausted(output)) {
		CERROR("%s: Range received from server is exhausted: "
		       DRANGE"]\n", seq->lcs_name, PRANGE(output));
		GOTO(out_req, rc = -EINVAL);
	}

	CDEBUG_LIMIT(debug_mask, "%s: Allocated %s-sequence "DRANGE"]\n",
		     seq->lcs_name, opcname, PRANGE(output));

	EXIT;
out_req:
	ptlrpc_req_finished(req);
	return rc;
}
Example #16
0
static int osc_object_fiemap(const struct lu_env *env, struct cl_object *obj,
                             struct ll_fiemap_info_key *fmkey,
                             struct fiemap *fiemap, size_t *buflen)
{
    struct obd_export *exp = osc_export(cl2osc(obj));
    struct ldlm_res_id resid;
    union ldlm_policy_data policy;
    struct lustre_handle lockh;
    enum ldlm_mode mode = LCK_MINMODE;
    struct ptlrpc_request *req;
    struct fiemap *reply;
    char *tmp;
    int rc;
    ENTRY;

    fmkey->lfik_oa.o_oi = cl2osc(obj)->oo_oinfo->loi_oi;
    if (!(fmkey->lfik_fiemap.fm_flags & FIEMAP_FLAG_SYNC))
        goto skip_locking;

    policy.l_extent.start = fmkey->lfik_fiemap.fm_start & PAGE_MASK;

    if (OBD_OBJECT_EOF - fmkey->lfik_fiemap.fm_length <=
            fmkey->lfik_fiemap.fm_start + PAGE_SIZE - 1)
        policy.l_extent.end = OBD_OBJECT_EOF;
    else
        policy.l_extent.end = (fmkey->lfik_fiemap.fm_start +
                               fmkey->lfik_fiemap.fm_length +
                               PAGE_SIZE - 1) & PAGE_MASK;

    ostid_build_res_name(&fmkey->lfik_oa.o_oi, &resid);
    mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
                           LDLM_FL_BLOCK_GRANTED | LDLM_FL_LVB_READY,
                           &resid, LDLM_EXTENT, &policy,
                           LCK_PR | LCK_PW, &lockh, 0);
    if (mode) { /* lock is cached on client */
        if (mode != LCK_PR) {
            ldlm_lock_addref(&lockh, LCK_PR);
            ldlm_lock_decref(&lockh, LCK_PW);
        }
    } else { /* no cached lock, needs acquire lock on server side */
        fmkey->lfik_oa.o_valid |= OBD_MD_FLFLAGS;
        fmkey->lfik_oa.o_flags |= OBD_FL_SRVLOCK;
    }

skip_locking:
    req = ptlrpc_request_alloc(class_exp2cliimp(exp),
                               &RQF_OST_GET_INFO_FIEMAP);
    if (req == NULL)
        GOTO(drop_lock, rc = -ENOMEM);

    req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY, RCL_CLIENT,
                         sizeof(*fmkey));
    req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_CLIENT,
                         *buflen);
    req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_SERVER,
                         *buflen);

    rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
    if (rc != 0) {
        ptlrpc_request_free(req);
        GOTO(drop_lock, rc);
    }
    tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
    memcpy(tmp, fmkey, sizeof(*fmkey));
    tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
    memcpy(tmp, fiemap, *buflen);
    ptlrpc_request_set_replen(req);

    rc = ptlrpc_queue_wait(req);
    if (rc != 0)
        GOTO(fini_req, rc);

    reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
    if (reply == NULL)
        GOTO(fini_req, rc = -EPROTO);

    memcpy(fiemap, reply, *buflen);
fini_req:
    ptlrpc_req_finished(req);
drop_lock:
    if (mode)
        ldlm_lock_decref(&lockh, LCK_PR);
    RETURN(rc);
}
Example #17
0
int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
               const void *data, int datalen, int mode, __u32 uid, __u32 gid,
               cfs_cap_t cap_effective, __u64 rdev,
               struct ptlrpc_request **request)
{
        struct ptlrpc_request *req;
        int level, rc;
        int count, resends = 0;
        struct obd_import *import = exp->exp_obd->u.cli.cl_import;
        int generation = import->imp_generation;
        CFS_LIST_HEAD(cancels);
        ENTRY;

        /* For case if upper layer did not alloc fid, do it now. */
        if (!fid_is_sane(&op_data->op_fid2)) {
                /*
                 * mdc_fid_alloc() may return errno 1 in case of switch to new
                 * sequence, handle this.
                 */
                rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
                if (rc < 0) {
                        CERROR("Can't alloc new fid, rc %d\n", rc);
                        RETURN(rc);
                }
        }

rebuild:
        count = 0;
        if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
            (fid_is_sane(&op_data->op_fid1)))
                count = mdc_resource_get_unused(exp, &op_data->op_fid1,
                                                &cancels, LCK_EX,
                                                MDS_INODELOCK_UPDATE);

        req = ptlrpc_request_alloc(class_exp2cliimp(exp),
                                   &RQF_MDS_REINT_CREATE_RMT_ACL);
        if (req == NULL) {
                ldlm_lock_list_put(&cancels, l_bl_ast, count);
                RETURN(-ENOMEM);
        }
        mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
        req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
                             op_data->op_namelen + 1);
        req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
                             data && datalen ? datalen : 0);

	rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
	if (rc) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}

        /*
         * mdc_create_pack() fills msg->bufs[1] with name and msg->bufs[2] with
         * tgt, for symlinks or lov MD data.
         */
        mdc_create_pack(req, op_data, data, datalen, mode, uid,
                        gid, cap_effective, rdev);

        ptlrpc_request_set_replen(req);

	/* ask ptlrpc not to resend on EINPROGRESS since we have our own retry
	 * logic here */
	req->rq_no_retry_einprogress = 1;

        if (resends) {
                req->rq_generation_set = 1;
                req->rq_import_generation = generation;
                req->rq_sent = cfs_time_current_sec() + resends;
        }
        level = LUSTRE_IMP_FULL;
 resend:
        rc = mdc_reint(req, exp->exp_obd->u.cli.cl_rpc_lock, level);

        /* Resend if we were told to. */
        if (rc == -ERESTARTSYS) {
                level = LUSTRE_IMP_RECOVER;
                goto resend;
        } else if (rc == -EINPROGRESS) {
                /* Retry create infinitely until succeed or get other
                 * error code. */
                ptlrpc_req_finished(req);
                resends++;

                CDEBUG(D_HA, "%s: resend:%d create on "DFID"/"DFID"\n",
                       exp->exp_obd->obd_name, resends,
                       PFID(&op_data->op_fid1), PFID(&op_data->op_fid2));

                if (generation == import->imp_generation) {
                        goto rebuild;
                } else {
                        CDEBUG(D_HA, "resend cross eviction\n");
                        RETURN(-EIO);
                }
        } else if (rc == 0) {
                struct mdt_body *body;
                struct lustre_capa *capa;

                body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
                LASSERT(body);
                if (body->valid & OBD_MD_FLMDSCAPA) {
                        capa = req_capsule_server_get(&req->rq_pill,
                                                      &RMF_CAPA1);
                        if (capa == NULL)
                                rc = -EPROTO;
                }
        }

        *request = req;
        RETURN(rc);
}
Example #18
0
/*
 * Fetch a global or slave index from the QMT.
 *
 * \param env    - the environment passed by the caller
 * \param exp    - is the export to use to issue the OBD_IDX_READ RPC
 * \param ii     - is the index information to be packed in the request
 *                 on success, the index information returned by the server
 *                 is copied there.
 * \param npages - is the number of pages in the pages array
 * \param pages  - is an array of @npages pages
 *
 * \retval 0     - success
 * \retval -ve   - appropriate errors
 */
int qsd_fetch_index(const struct lu_env *env, struct obd_export *exp,
		    struct idx_info *ii, unsigned int npages,
		    struct page **pages, bool *need_swab)
{
	struct ptlrpc_request	*req;
	struct idx_info		*req_ii;
	struct ptlrpc_bulk_desc *desc;
	int			 rc, i;
	ENTRY;

	LASSERT(exp);

	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OBD_IDX_READ);
	if (req == NULL)
		RETURN(-ENOMEM);

	rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, OBD_IDX_READ);
	if (rc) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}

	req->rq_request_portal = MDS_READPAGE_PORTAL;
	ptlrpc_at_set_req_timeout(req);

	/* allocate bulk descriptor */
	desc = ptlrpc_prep_bulk_imp(req, npages, 1, BULK_PUT_SINK,
				    MDS_BULK_PORTAL);
	if (desc == NULL) {
		ptlrpc_request_free(req);
		RETURN(-ENOMEM);
	}

	/* req now owns desc and will free it when it gets freed */
	for (i = 0; i < npages; i++)
		ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);

	/* pack index information in request */
	req_ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO);
	*req_ii = *ii;

	ptlrpc_request_set_replen(req);

	/* send request to master and wait for RPC to complete */
	rc = ptlrpc_queue_wait(req);
	if (rc)
		GOTO(out, rc);

	rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
					  req->rq_bulk->bd_nob_transferred);
	if (rc < 0)
		GOTO(out, rc);
	else
		/* sptlrpc_cli_unwrap_bulk_read() returns the number of bytes
		 * transferred*/
		rc = 0;

	req_ii = req_capsule_server_get(&req->rq_pill, &RMF_IDX_INFO);
	*ii = *req_ii;

	*need_swab = ptlrpc_rep_need_swab(req);

	EXIT;
out:
	ptlrpc_req_finished(req);
	return rc;
}
Example #19
0
/*
 * Get intent per-ID lock or global-index lock from master.
 *
 * \param env    - the environment passed by the caller
 * \param exp    - is the export to use to send the intent RPC
 * \param qbody  - quota body to be packed in request
 * \param sync   - synchronous or asynchronous (pre-acquire)
 * \param it_op  - IT_QUOTA_DQACQ or IT_QUOTA_CONN
 * \param completion - completion callback
 * \param qqi    - is the qsd_qtype_info structure to pass to the completion
 *                 function
 * \param lvb    - is the lvb associated with the lock and returned by the
 *                 server
 * \param arg    - is an opaq argument passed to the completion callback
 *
 * \retval 0     - success
 * \retval -ve   - appropriate errors
 */
int qsd_intent_lock(const struct lu_env *env, struct obd_export *exp,
		    struct quota_body *qbody, bool sync, int it_op,
		    qsd_req_completion_t completion, struct qsd_qtype_info *qqi,
		    struct lquota_lvb *lvb, void *arg)
{
	struct qsd_thread_info	*qti = qsd_info(env);
	struct ptlrpc_request	*req;
	struct qsd_async_args	*aa = NULL;
	struct ldlm_intent	*lit;
	struct quota_body	*req_qbody;
	__u64			 flags = LDLM_FL_HAS_INTENT;
	int			 rc;
	ENTRY;

	LASSERT(exp != NULL);
	LASSERT(!lustre_handle_is_used(&qbody->qb_lockh));

	memset(&qti->qti_lockh, 0, sizeof(qti->qti_lockh));

	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
				   &RQF_LDLM_INTENT_QUOTA);
	if (req == NULL)
		GOTO(out, rc = -ENOMEM);

	req->rq_no_retry_einprogress = 1;
	rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
	if (rc) {
		ptlrpc_request_free(req);
		GOTO(out, rc);
	}

	lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
	lit->opc = (__u64)it_op;

	req_qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY);
	*req_qbody = *qbody;

	req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
			     sizeof(*lvb));
	ptlrpc_request_set_replen(req);

	switch(it_op) {
	case IT_QUOTA_CONN:
		/* build resource name associated with global index */
		fid_build_reg_res_name(&qbody->qb_fid, &qti->qti_resid);

		/* copy einfo template and fill ei_cbdata with qqi pointer */
		memcpy(&qti->qti_einfo, &qsd_glb_einfo, sizeof(qti->qti_einfo));
		qti->qti_einfo.ei_cbdata = qqi;

		/* don't cancel global lock on memory pressure */
		flags |= LDLM_FL_NO_LRU;
		break;
	case IT_QUOTA_DQACQ:
		/* build resource name associated for per-ID quota lock */
		fid_build_quota_res_name(&qbody->qb_fid, &qbody->qb_id,
					 &qti->qti_resid);

		/* copy einfo template and fill ei_cbdata with lqe pointer */
		memcpy(&qti->qti_einfo, &qsd_id_einfo, sizeof(qti->qti_einfo));
		qti->qti_einfo.ei_cbdata = arg;
		break;
	default:
		LASSERTF(0, "invalid it_op %d", it_op);
	}

	/* build lock enqueue request */
	rc = ldlm_cli_enqueue(exp, &req, &qti->qti_einfo, &qti->qti_resid, NULL,
			      &flags, (void *)lvb, sizeof(*lvb), LVB_T_LQUOTA,
			      &qti->qti_lockh, 1);
	if (rc < 0) {
		ptlrpc_req_finished(req);
		GOTO(out, rc);
	}

	/* grab reference on backend structure for the new lock */
	switch(it_op) {
	case IT_QUOTA_CONN:
		/* grab reference on qqi for new lock */
#ifdef USE_LU_REF
	{
		struct ldlm_lock	*lock;

		lock = ldlm_handle2lock(&qti->qti_lockh);
		if (lock == NULL) {
			ptlrpc_req_finished(req);
			GOTO(out, rc = -ENOLCK);
		}
		lu_ref_add(&qqi->qqi_reference, "glb_lock", lock);
		LDLM_LOCK_PUT(lock);
	}
#endif
		qqi_getref(qqi);
		break;
	case IT_QUOTA_DQACQ:
		/* grab reference on lqe for new lock */
		lqe_getref((struct lquota_entry *)arg);
		/* all acquire/release request are sent with no_resend and
		 * no_delay flag */
		req->rq_no_resend = req->rq_no_delay = 1;
		break;
	default:
		break;
	}

	CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
	aa = ptlrpc_req_async_args(req);
	aa->aa_exp = exp;
	aa->aa_qqi = qqi;
	aa->aa_arg = arg;
	aa->aa_lvb = lvb;
	aa->aa_completion = completion;
	lustre_handle_copy(&aa->aa_lockh, &qti->qti_lockh);

	if (sync) {
		/* send lock enqueue request and wait for completion */
		rc = ptlrpc_queue_wait(req);
		rc = qsd_intent_interpret(env, req, aa, rc);
		ptlrpc_req_finished(req);
	} else {
		/* queue lock request and return */
		req->rq_interpret_reply = qsd_intent_interpret;
		ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
	}

	RETURN(rc);
out:
	completion(env, qqi, qbody, NULL, &qti->qti_lockh, lvb, arg, rc);
	return rc;
}
Example #20
0
int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
		const void *data, size_t datalen,
		umode_t mode, uid_t uid, gid_t gid,
		cfs_cap_t cap_effective, __u64 rdev,
		struct ptlrpc_request **request)
{
        struct ptlrpc_request *req;
        int level, rc;
        int count, resends = 0;
        struct obd_import *import = exp->exp_obd->u.cli.cl_import;
        int generation = import->imp_generation;
	struct list_head cancels = LIST_HEAD_INIT(cancels);
        ENTRY;

	/* For case if upper layer did not alloc fid, do it now. */
	if (!fid_is_sane(&op_data->op_fid2)) {
		/*
		 * mdc_fid_alloc() may return errno 1 in case of switch to new
		 * sequence, handle this.
		 */
		rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
		if (rc < 0)
			RETURN(rc);
	}

rebuild:
        count = 0;
        if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
            (fid_is_sane(&op_data->op_fid1)))
                count = mdc_resource_get_unused(exp, &op_data->op_fid1,
                                                &cancels, LCK_EX,
                                                MDS_INODELOCK_UPDATE);

        req = ptlrpc_request_alloc(class_exp2cliimp(exp),
				   &RQF_MDS_REINT_CREATE_ACL);
        if (req == NULL) {
                ldlm_lock_list_put(&cancels, l_bl_ast, count);
                RETURN(-ENOMEM);
        }

        req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
                             op_data->op_namelen + 1);
        req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
                             data && datalen ? datalen : 0);

	req_capsule_set_size(&req->rq_pill, &RMF_FILE_SECCTX_NAME,
			     RCL_CLIENT, op_data->op_file_secctx_name != NULL ?
			     strlen(op_data->op_file_secctx_name) + 1 : 0);

	req_capsule_set_size(&req->rq_pill, &RMF_FILE_SECCTX, RCL_CLIENT,
			     op_data->op_file_secctx_size);

	/* get SELinux policy info if any */
	rc = sptlrpc_get_sepol(req);
	if (rc < 0) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}
	req_capsule_set_size(&req->rq_pill, &RMF_SELINUX_POL, RCL_CLIENT,
			     strlen(req->rq_sepol) ?
			     strlen(req->rq_sepol) + 1 : 0);

	rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
	if (rc) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}

        /*
         * mdc_create_pack() fills msg->bufs[1] with name and msg->bufs[2] with
         * tgt, for symlinks or lov MD data.
         */
        mdc_create_pack(req, op_data, data, datalen, mode, uid,
                        gid, cap_effective, rdev);

        ptlrpc_request_set_replen(req);

	/* ask ptlrpc not to resend on EINPROGRESS since we have our own retry
	 * logic here */
	req->rq_no_retry_einprogress = 1;

        if (resends) {
                req->rq_generation_set = 1;
                req->rq_import_generation = generation;
		req->rq_sent = ktime_get_real_seconds() + resends;
        }
        level = LUSTRE_IMP_FULL;
 resend:
	rc = mdc_reint(req, level);

        /* Resend if we were told to. */
        if (rc == -ERESTARTSYS) {
                level = LUSTRE_IMP_RECOVER;
                goto resend;
        } else if (rc == -EINPROGRESS) {
		/* Retry create infinitely until succeed or get other
		 * error code or interrupted. */
		ptlrpc_req_finished(req);
		if (generation == import->imp_generation) {
			if (signal_pending(current))
				RETURN(-EINTR);

			resends++;
			CDEBUG(D_HA, "%s: resend:%d create on "DFID"/"DFID"\n",
			       exp->exp_obd->obd_name, resends,
			       PFID(&op_data->op_fid1),
			       PFID(&op_data->op_fid2));
			goto rebuild;
                } else {
                        CDEBUG(D_HA, "resend cross eviction\n");
                        RETURN(-EIO);
                }
        }

        *request = req;
        RETURN(rc);
}
Example #21
0
int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
               const void *data, int datalen, int mode, __u32 uid, __u32 gid,
               cfs_cap_t cap_effective, __u64 rdev,
               struct ptlrpc_request **request)
{
        struct ptlrpc_request *req;
        int level, rc;
        int count = 0;
        CFS_LIST_HEAD(cancels);
        ENTRY;

        /* For case if upper layer did not alloc fid, do it now. */
        if (!fid_is_sane(&op_data->op_fid2)) {
                /*
                 * mdc_fid_alloc() may return errno 1 in case of switch to new
                 * sequence, handle this.
                 */
                rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
                if (rc < 0) {
                        CERROR("Can't alloc new fid, rc %d\n", rc);
                        RETURN(rc);
                }
        }

        if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
            (fid_is_sane(&op_data->op_fid1)))
                count = mdc_resource_get_unused(exp, &op_data->op_fid1,
                                                &cancels, LCK_EX,
                                                MDS_INODELOCK_UPDATE);

        req = ptlrpc_request_alloc(class_exp2cliimp(exp),
                                   &RQF_MDS_REINT_CREATE_RMT_ACL);
        if (req == NULL) {
                ldlm_lock_list_put(&cancels, l_bl_ast, count);
                RETURN(-ENOMEM);
        }
        mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
        req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
                             op_data->op_namelen + 1);
        req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
                             data && datalen ? datalen : 0);

        rc = mdc_prep_elc_req(exp, req, &cancels, count);
        if (rc) {
                ptlrpc_request_free(req);
                RETURN(rc);
        }

        /*
         * mdc_create_pack() fills msg->bufs[1] with name and msg->bufs[2] with
         * tgt, for symlinks or lov MD data.
         */
        mdc_create_pack(req, op_data, data, datalen, mode, uid,
                        gid, cap_effective, rdev);

        ptlrpc_request_set_replen(req);

        level = LUSTRE_IMP_FULL;
 resend:
        rc = mdc_reint(req, exp->exp_obd->u.cli.cl_rpc_lock, level);

        /* Resend if we were told to. */
        if (rc == -ERESTARTSYS) {
                level = LUSTRE_IMP_RECOVER;
                goto resend;
        } else if (rc == 0) {
                struct mdt_body *body;
                struct lustre_capa *capa;

                body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
                LASSERT(body);
                if (body->valid & OBD_MD_FLMDSCAPA) {
                        capa = req_capsule_server_get(&req->rq_pill,
                                                      &RMF_CAPA1);
                        if (capa == NULL)
                                rc = -EPROTO;
                }
        }

        *request = req;
        RETURN(rc);
}
Example #22
0
/* If mdc_setattr is called with an 'iattr', then it is a normal RPC that
 * should take the normal semaphore and go to the normal portal.
 *
 * If it is called with iattr->ia_valid & ATTR_FROM_OPEN, then it is a
 * magic open-path setattr that should take the setattr semaphore and
 * go to the setattr portal. */
int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
                void *ea, int ealen, void *ea2, int ea2len,
                struct ptlrpc_request **request, struct md_open_data **mod)
{
        CFS_LIST_HEAD(cancels);
        struct ptlrpc_request *req;
        struct mdc_rpc_lock *rpc_lock;
        struct obd_device *obd = exp->exp_obd;
        int count = 0, rc;
        __u64 bits;
        ENTRY;

        LASSERT(op_data != NULL);

        bits = MDS_INODELOCK_UPDATE;
        if (op_data->op_attr.ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID))
                bits |= MDS_INODELOCK_LOOKUP;
        if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
            (fid_is_sane(&op_data->op_fid1)))
                count = mdc_resource_get_unused(exp, &op_data->op_fid1,
                                                &cancels, LCK_EX, bits);
        req = ptlrpc_request_alloc(class_exp2cliimp(exp),
                                   &RQF_MDS_REINT_SETATTR);
        if (req == NULL) {
                ldlm_lock_list_put(&cancels, l_bl_ast, count);
                RETURN(-ENOMEM);
        }
        mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
        if ((op_data->op_flags & (MF_SOM_CHANGE | MF_EPOCH_OPEN)) == 0)
                req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT,
                                     0);
        req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen);
        req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT,
                             ea2len);

        rc = mdc_prep_elc_req(exp, req, &cancels, count);
        if (rc) {
                ptlrpc_request_free(req);
                RETURN(rc);
        }

        if (op_data->op_attr.ia_valid & ATTR_FROM_OPEN) {
                req->rq_request_portal = MDS_SETATTR_PORTAL;
                ptlrpc_at_set_req_timeout(req);
                rpc_lock = obd->u.cli.cl_setattr_lock;
        } else {
                rpc_lock = obd->u.cli.cl_rpc_lock;
        }

        if (op_data->op_attr.ia_valid & (ATTR_MTIME | ATTR_CTIME))
                CDEBUG(D_INODE, "setting mtime "CFS_TIME_T
                       ", ctime "CFS_TIME_T"\n",
                       LTIME_S(op_data->op_attr.ia_mtime),
                       LTIME_S(op_data->op_attr.ia_ctime));
        mdc_setattr_pack(req, op_data, ea, ealen, ea2, ea2len);

        ptlrpc_request_set_replen(req);
        if (mod && (op_data->op_flags & MF_EPOCH_OPEN) &&
            req->rq_import->imp_replayable)
        {
                LASSERT(*mod == NULL);

                *mod = obd_mod_alloc();
                if (*mod == NULL) {
                        DEBUG_REQ(D_ERROR, req, "Can't allocate "
                                  "md_open_data");
                } else {
                        req->rq_replay = 1;
                        req->rq_cb_data = *mod;
                        (*mod)->mod_open_req = req;
                        req->rq_commit_cb = mdc_commit_open;
                        /**
                         * Take an extra reference on \var mod, it protects \var
                         * mod from being freed on eviction (commit callback is
                         * called despite rq_replay flag).
                         * Will be put on mdc_done_writing().
                         */
                        obd_mod_get(*mod);
                }
        }

        rc = mdc_reint(req, rpc_lock, LUSTRE_IMP_FULL);

        /* Save the obtained info in the original RPC for the replay case. */
        if (rc == 0 && (op_data->op_flags & MF_EPOCH_OPEN)) {
                struct mdt_ioepoch *epoch;
                struct mdt_body  *body;

                epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
                body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
                LASSERT(epoch != NULL);
                LASSERT(body != NULL);
                epoch->handle = body->handle;
                epoch->ioepoch = body->ioepoch;
                req->rq_replay_cb = mdc_replay_open;
        /** bug 3633, open may be committed and estale answer is not error */
        } else if (rc == -ESTALE && (op_data->op_flags & MF_SOM_CHANGE)) {
                rc = 0;
        } else if (rc == -ERESTARTSYS) {
                rc = 0;
        }
        *request = req;
        if (rc && req->rq_commit_cb) {
                /* Put an extra reference on \var mod on error case. */
                obd_mod_put(*mod);
                req->rq_commit_cb(req);
        }
        RETURN(rc);
}
Example #23
0
static int seq_client_rpc(struct lu_client_seq *seq,
                          struct lu_seq_range *output, __u32 opc,
                          const char *opcname)
{
        struct obd_export     *exp = seq->lcs_exp;
        struct ptlrpc_request *req;
        struct lu_seq_range   *out, *in;
        __u32                 *op;
        int                    rc;
        ENTRY;

        req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
                                        LUSTRE_MDS_VERSION, SEQ_QUERY);
        if (req == NULL)
                RETURN(-ENOMEM);

        /* Init operation code */
        op = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_OPC);
        *op = opc;

        /* Zero out input range, this is not recovery yet. */
        in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE);
        range_init(in);

        ptlrpc_request_set_replen(req);

       if (seq->lcs_type == LUSTRE_SEQ_METADATA) {
                req->rq_request_portal = SEQ_METADATA_PORTAL;
                in->lsr_flags = LU_SEQ_RANGE_MDT;
        } else {
                LASSERTF(seq->lcs_type == LUSTRE_SEQ_DATA,
                         "unknown lcs_type %u\n", seq->lcs_type);
                req->rq_request_portal = SEQ_DATA_PORTAL;
                in->lsr_flags = LU_SEQ_RANGE_OST;
        }

        if (opc == SEQ_ALLOC_SUPER) {
                /* Update index field of *in, it is required for
                 * FLD update on super sequence allocator node. */
                in->lsr_index = seq->lcs_space.lsr_index;
                req->rq_request_portal = SEQ_CONTROLLER_PORTAL;
        } else {
                LASSERTF(opc == SEQ_ALLOC_META,
                         "unknown opcode %u\n, opc", opc);
        }

        ptlrpc_at_set_req_timeout(req);

        mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
        rc = ptlrpc_queue_wait(req);
        mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);

        if (rc)
                GOTO(out_req, rc);

        out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE);
        *output = *out;

        if (!range_is_sane(output)) {
                CERROR("%s: Invalid range received from server: "
                       DRANGE"\n", seq->lcs_name, PRANGE(output));
                GOTO(out_req, rc = -EINVAL);
        }

        if (range_is_exhausted(output)) {
                CERROR("%s: Range received from server is exhausted: "
                       DRANGE"]\n", seq->lcs_name, PRANGE(output));
                GOTO(out_req, rc = -EINVAL);
        }

        CDEBUG(D_INFO, "%s: Allocated %s-sequence "DRANGE"]\n",
               seq->lcs_name, opcname, PRANGE(output));

        EXIT;
out_req:
        ptlrpc_req_finished(req);
        return rc;
}