Example #1
0
int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
             struct ptlrpc_request **request)
{
	struct list_head cancels = LIST_HEAD_INIT(cancels);
        struct ptlrpc_request *req;
        int count = 0, rc;
        ENTRY;

        if ((op_data->op_flags & MF_MDC_CANCEL_FID2) &&
            (fid_is_sane(&op_data->op_fid2)))
                count = mdc_resource_get_unused(exp, &op_data->op_fid2,
                                                &cancels, LCK_EX,
                                                MDS_INODELOCK_UPDATE);
        if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
            (fid_is_sane(&op_data->op_fid1)))
                count += mdc_resource_get_unused(exp, &op_data->op_fid1,
                                                 &cancels, LCK_EX,
                                                 MDS_INODELOCK_UPDATE);

        req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_LINK);
        if (req == NULL) {
                ldlm_lock_list_put(&cancels, l_bl_ast, count);
                RETURN(-ENOMEM);
        }

        req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
                             op_data->op_namelen + 1);

	/* get SELinux policy info if any */
	rc = sptlrpc_get_sepol(req);
	if (rc < 0) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}
	req_capsule_set_size(&req->rq_pill, &RMF_SELINUX_POL, RCL_CLIENT,
			     strlen(req->rq_sepol) ?
			     strlen(req->rq_sepol) + 1 : 0);

	rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
	if (rc) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}

        mdc_link_pack(req, op_data);
        ptlrpc_request_set_replen(req);

	rc = mdc_reint(req, LUSTRE_IMP_FULL);
        *request = req;
        if (rc == -ERESTARTSYS)
                rc = 0;

        RETURN(rc);
}
Example #2
0
int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
		void *ea, size_t ealen, struct ptlrpc_request **request)
{
	struct list_head cancels = LIST_HEAD_INIT(cancels);
        struct ptlrpc_request *req;
        struct mdc_rpc_lock *rpc_lock;
        struct obd_device *obd = exp->exp_obd;
        int count = 0, rc;
        __u64 bits;
        ENTRY;

        LASSERT(op_data != NULL);

        bits = MDS_INODELOCK_UPDATE;
        if (op_data->op_attr.ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID))
                bits |= MDS_INODELOCK_LOOKUP;
	if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
	    (fid_is_sane(&op_data->op_fid1)))
		count = mdc_resource_get_unused(exp, &op_data->op_fid1,
						&cancels, LCK_EX, bits);
        req = ptlrpc_request_alloc(class_exp2cliimp(exp),
                                   &RQF_MDS_REINT_SETATTR);
        if (req == NULL) {
                ldlm_lock_list_put(&cancels, l_bl_ast, count);
                RETURN(-ENOMEM);
        }
        mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
	req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT, 0);
	req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen);
	req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT, 0);

	rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
	if (rc) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}

        rpc_lock = obd->u.cli.cl_rpc_lock;

        if (op_data->op_attr.ia_valid & (ATTR_MTIME | ATTR_CTIME))
                CDEBUG(D_INODE, "setting mtime "CFS_TIME_T
                       ", ctime "CFS_TIME_T"\n",
                       LTIME_S(op_data->op_attr.ia_mtime),
                       LTIME_S(op_data->op_attr.ia_ctime));
	mdc_setattr_pack(req, op_data, ea, ealen);

        ptlrpc_request_set_replen(req);

        rc = mdc_reint(req, rpc_lock, LUSTRE_IMP_FULL);
	if (rc == -ERESTARTSYS)
                rc = 0;

        *request = req;

	RETURN(rc);
}
Example #3
0
int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
               struct ptlrpc_request **request)
{
        CFS_LIST_HEAD(cancels);
        struct obd_device *obd = class_exp2obd(exp);
        struct ptlrpc_request *req = *request;
        int count = 0, rc;
        ENTRY;

        LASSERT(req == NULL);

	if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
	    (fid_is_sane(&op_data->op_fid1)) &&
	    !OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
		count = mdc_resource_get_unused(exp, &op_data->op_fid1,
						&cancels, LCK_EX,
						MDS_INODELOCK_UPDATE);
	if ((op_data->op_flags & MF_MDC_CANCEL_FID3) &&
	    (fid_is_sane(&op_data->op_fid3)) &&
	    !OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
		count += mdc_resource_get_unused(exp, &op_data->op_fid3,
						 &cancels, LCK_EX,
						 MDS_INODELOCK_FULL);
        req = ptlrpc_request_alloc(class_exp2cliimp(exp),
                                   &RQF_MDS_REINT_UNLINK);
        if (req == NULL) {
                ldlm_lock_list_put(&cancels, l_bl_ast, count);
                RETURN(-ENOMEM);
        }
        mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
        req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
                             op_data->op_namelen + 1);

	rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
	if (rc) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}

        mdc_unlink_pack(req, op_data);

        req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
                             obd->u.cli.cl_max_mds_easize);
        req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
                             obd->u.cli.cl_max_mds_cookiesize);
        ptlrpc_request_set_replen(req);

        *request = req;

        rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
        if (rc == -ERESTARTSYS)
                rc = 0;
        RETURN(rc);
}
Example #4
0
/*
 * Send non-intent quota request to master.
 *
 * \param env    - the environment passed by the caller
 * \param exp    - is the export to use to send the acquire RPC
 * \param qbody  - quota body to be packed in request
 * \param sync   - synchronous or asynchronous
 * \param completion - completion callback
 * \param qqi    - is the qsd_qtype_info structure to pass to the completion
 *                 function
 * \param lqe    - is the qid entry to be processed
 *
 * \retval 0     - success
 * \retval -ve   - appropriate errors
 */
int qsd_send_dqacq(const struct lu_env *env, struct obd_export *exp,
		   struct quota_body *qbody, bool sync,
		   qsd_req_completion_t completion, struct qsd_qtype_info *qqi,
		   struct lustre_handle *lockh, struct lquota_entry *lqe)
{
	struct ptlrpc_request	*req;
	struct quota_body	*req_qbody;
	struct qsd_async_args	*aa;
	int			 rc;
	ENTRY;

	LASSERT(exp);

	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_QUOTA_DQACQ);
	if (req == NULL)
		GOTO(out, rc = -ENOMEM);

	req->rq_no_resend = req->rq_no_delay = 1;
	req->rq_no_retry_einprogress = 1;
	rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, QUOTA_DQACQ);
	if (rc) {
		ptlrpc_request_free(req);
		GOTO(out, rc);
	}

	req->rq_request_portal = MDS_READPAGE_PORTAL;
	req_qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY);
	*req_qbody = *qbody;

	ptlrpc_request_set_replen(req);

	CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
	aa = ptlrpc_req_async_args(req);
	aa->aa_exp = exp;
	aa->aa_qqi = qqi;
	aa->aa_arg = (void *)lqe;
	aa->aa_completion = completion;
	lustre_handle_copy(&aa->aa_lockh, lockh);

	if (sync) {
		rc = ptlrpc_queue_wait(req);
		rc = qsd_dqacq_interpret(env, req, aa, rc);
		ptlrpc_req_finished(req);
	} else {
		req->rq_interpret_reply = qsd_dqacq_interpret;
		ptlrpcd_add_req(req);
	}

	RETURN(rc);
out:
	completion(env, qqi, qbody, NULL, lockh, NULL, lqe, rc);
	return rc;
}
Example #5
0
static int osc_io_data_version_start(const struct lu_env *env,
				     const struct cl_io_slice *slice)
{
	struct cl_data_version_io *dv	= &slice->cis_io->u.ci_data_version;
	struct osc_io		*oio	= cl2osc_io(env, slice);
	struct obdo		*oa	= &oio->oi_oa;
	struct osc_async_cbargs	*cbargs	= &oio->oi_cbarg;
	struct osc_object	*obj	= cl2osc(slice->cis_obj);
	struct lov_oinfo	*loi	= obj->oo_oinfo;
	struct obd_export	*exp	= osc_export(obj);
	struct ptlrpc_request	*req;
	struct ost_body		*body;
	struct osc_data_version_args *dva;
	int rc;

	ENTRY;
	memset(oa, 0, sizeof(*oa));
	oa->o_oi = loi->loi_oi;
	oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;

	if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
		oa->o_valid |= OBD_MD_FLFLAGS;
		oa->o_flags |= OBD_FL_SRVLOCK;
		if (dv->dv_flags & LL_DV_WR_FLUSH)
			oa->o_flags |= OBD_FL_FLUSH;
	}

	init_completion(&cbargs->opc_sync);

	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
	if (req == NULL)
		RETURN(-ENOMEM);

	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
	if (rc < 0) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}

	body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
	lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);

	ptlrpc_request_set_replen(req);
	req->rq_interpret_reply = osc_data_version_interpret;
	CLASSERT(sizeof(*dva) <= sizeof(req->rq_async_args));
	dva = ptlrpc_req_async_args(req);
	dva->dva_oio = oio;

	ptlrpcd_add_req(req);

	RETURN(0);
}
Example #6
0
int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
		void *ea, size_t ealen, struct ptlrpc_request **request)
{
	LIST_HEAD(cancels);
	struct ptlrpc_request *req;
	int count = 0, rc;
	__u64 bits;

	bits = MDS_INODELOCK_UPDATE;
	if (op_data->op_attr.ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
		bits |= MDS_INODELOCK_LOOKUP;
	if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
	    (fid_is_sane(&op_data->op_fid1)))
		count = mdc_resource_get_unused(exp, &op_data->op_fid1,
						&cancels, LCK_EX, bits);
	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
				   &RQF_MDS_REINT_SETATTR);
	if (!req) {
		ldlm_lock_list_put(&cancels, l_bl_ast, count);
		return -ENOMEM;
	}
	req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT, 0);
	req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen);
	req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT, 0);

	rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
	if (rc) {
		ptlrpc_request_free(req);
		return rc;
	}

	if (op_data->op_attr.ia_valid & (ATTR_MTIME | ATTR_CTIME))
		CDEBUG(D_INODE, "setting mtime %ld, ctime %ld\n",
		       LTIME_S(op_data->op_attr.ia_mtime),
		       LTIME_S(op_data->op_attr.ia_ctime));
	mdc_setattr_pack(req, op_data, ea, ealen);

	ptlrpc_request_set_replen(req);

	rc = mdc_reint(req, LUSTRE_IMP_FULL);

	if (rc == -ERESTARTSYS)
		rc = 0;

	*request = req;

	return rc;
}
Example #7
0
int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
	       struct ptlrpc_request **request)
{
	LIST_HEAD(cancels);
	struct obd_device *obd = class_exp2obd(exp);
	struct ptlrpc_request *req = *request;
	int count = 0, rc;

	LASSERT(!req);

	if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
	    (fid_is_sane(&op_data->op_fid1)))
		count = mdc_resource_get_unused(exp, &op_data->op_fid1,
						&cancels, LCK_EX,
						MDS_INODELOCK_UPDATE);
	if ((op_data->op_flags & MF_MDC_CANCEL_FID3) &&
	    (fid_is_sane(&op_data->op_fid3)))
		count += mdc_resource_get_unused(exp, &op_data->op_fid3,
						 &cancels, LCK_EX,
						 MDS_INODELOCK_FULL);
	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
				   &RQF_MDS_REINT_UNLINK);
	if (!req) {
		ldlm_lock_list_put(&cancels, l_bl_ast, count);
		return -ENOMEM;
	}
	req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
			     op_data->op_namelen + 1);

	rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
	if (rc) {
		ptlrpc_request_free(req);
		return rc;
	}

	mdc_unlink_pack(req, op_data);

	req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
			     obd->u.cli.cl_default_mds_easize);
	ptlrpc_request_set_replen(req);

	*request = req;

	rc = mdc_reint(req, LUSTRE_IMP_FULL);
	if (rc == -ERESTARTSYS)
		rc = 0;
	return rc;
}
Example #8
0
int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
             struct ptlrpc_request **request)
{
        CFS_LIST_HEAD(cancels);
        struct obd_device *obd = exp->exp_obd;
        struct ptlrpc_request *req;
        int count = 0, rc;
        ENTRY;

        if ((op_data->op_flags & MF_MDC_CANCEL_FID2) &&
            (fid_is_sane(&op_data->op_fid2)))
                count = mdc_resource_get_unused(exp, &op_data->op_fid2,
                                                &cancels, LCK_EX,
                                                MDS_INODELOCK_UPDATE);
        if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
            (fid_is_sane(&op_data->op_fid1)))
                count += mdc_resource_get_unused(exp, &op_data->op_fid1,
                                                 &cancels, LCK_EX,
                                                 MDS_INODELOCK_UPDATE);

        req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_LINK);
        if (req == NULL) {
                ldlm_lock_list_put(&cancels, l_bl_ast, count);
                RETURN(-ENOMEM);
        }
        mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
        mdc_set_capa_size(req, &RMF_CAPA2, op_data->op_capa2);
        req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
                             op_data->op_namelen + 1);

        rc = mdc_prep_elc_req(exp, req, &cancels, count);
        if (rc) {
                ptlrpc_request_free(req);
                RETURN(rc);
        }

        mdc_link_pack(req, op_data);
        ptlrpc_request_set_replen(req);

        rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
        *request = req;
        if (rc == -ERESTARTSYS)
                rc = 0;

        RETURN(rc);
}
Example #9
0
static int osp_statfs_update(struct osp_device *d)
{
	struct ptlrpc_request	*req;
	struct obd_import	*imp;
	union ptlrpc_async_args	*aa;
	int			 rc;

	ENTRY;

	CDEBUG(D_CACHE, "going to update statfs\n");

	imp = d->opd_obd->u.cli.cl_import;
	LASSERT(imp);

	req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
	if (req == NULL)
		RETURN(-ENOMEM);

	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
	if (rc) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}
	ptlrpc_request_set_replen(req);
	req->rq_request_portal = OST_CREATE_PORTAL;
	ptlrpc_at_set_req_timeout(req);

	req->rq_interpret_reply = (ptlrpc_interpterer_t)osp_statfs_interpret;
	aa = ptlrpc_req_async_args(req);
	aa->pointer_arg[0] = d;

	/*
	 * no updates till reply
	 */
	cfs_timer_disarm(&d->opd_statfs_timer);
	d->opd_statfs_fresh_till = cfs_time_shift(obd_timeout * 1000);
	d->opd_statfs_update_in_progress = 1;

	ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);

	RETURN(0);
}
Example #10
0
/**
 * asks OST to clean precreate orphans
 * and gets next id for new objects
 */
static int osp_precreate_cleanup_orphans(struct lu_env *env,
					 struct osp_device *d)
{
	struct osp_thread_info	*osi = osp_env_info(env);
	struct lu_fid		*last_fid = &osi->osi_fid;
	struct ptlrpc_request	*req = NULL;
	struct obd_import	*imp;
	struct ost_body		*body;
	struct l_wait_info	 lwi = { 0 };
	int			 update_status = 0;
	int			 rc;
	int			 diff;

	ENTRY;

	/*
	 * wait for local recovery to finish, so we can cleanup orphans
	 * orphans are all objects since "last used" (assigned), but
	 * there might be objects reserved and in some cases they won't
	 * be used. we can't cleanup them till we're sure they won't be
	 * used. also can't we allow new reservations because they may
	 * end up getting orphans being cleaned up below. so we block
	 * new reservations and wait till all reserved objects either
	 * user or released.
	 */
	spin_lock(&d->opd_pre_lock);
	d->opd_pre_recovering = 1;
	spin_unlock(&d->opd_pre_lock);
	/*
	 * The locking above makes sure the opd_pre_reserved check below will
	 * catch all osp_precreate_reserve() calls who find
	 * "!opd_pre_recovering".
	 */
	l_wait_event(d->opd_pre_waitq,
		     (!d->opd_pre_reserved && d->opd_recovery_completed) ||
		     !osp_precreate_running(d) || d->opd_got_disconnected,
		     &lwi);
	if (!osp_precreate_running(d) || d->opd_got_disconnected)
		GOTO(out, rc = -EAGAIN);

	CDEBUG(D_HA, "%s: going to cleanup orphans since "DFID"\n",
	       d->opd_obd->obd_name, PFID(&d->opd_last_used_fid));

	*last_fid = d->opd_last_used_fid;
	/* The OSP should already get the valid seq now */
	LASSERT(!fid_is_zero(last_fid));
	if (fid_oid(&d->opd_last_used_fid) < 2) {
		/* lastfid looks strange... ask OST */
		rc = osp_get_lastfid_from_ost(env, d);
		if (rc)
			GOTO(out, rc);
	}

	imp = d->opd_obd->u.cli.cl_import;
	LASSERT(imp);

	req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
	if (req == NULL)
		GOTO(out, rc = -ENOMEM);

	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
	if (rc) {
		ptlrpc_request_free(req);
		req = NULL;
		GOTO(out, rc);
	}

	body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
	if (body == NULL)
		GOTO(out, rc = -EPROTO);

	body->oa.o_flags = OBD_FL_DELORPHAN;
	body->oa.o_valid = OBD_MD_FLFLAGS | OBD_MD_FLGROUP;

	fid_to_ostid(&d->opd_last_used_fid, &body->oa.o_oi);

	ptlrpc_request_set_replen(req);

	/* Don't resend the delorphan req */
	req->rq_no_resend = req->rq_no_delay = 1;

	rc = ptlrpc_queue_wait(req);
	if (rc) {
		update_status = 1;
		GOTO(out, rc);
	}

	body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
	if (body == NULL)
		GOTO(out, rc = -EPROTO);

	/*
	 * OST provides us with id new pool starts from in body->oa.o_id
	 */
	ostid_to_fid(last_fid, &body->oa.o_oi, d->opd_index);

	spin_lock(&d->opd_pre_lock);
	diff = lu_fid_diff(&d->opd_last_used_fid, last_fid);
	if (diff > 0) {
		d->opd_pre_grow_count = OST_MIN_PRECREATE + diff;
		d->opd_pre_last_created_fid = d->opd_last_used_fid;
	} else {
		d->opd_pre_grow_count = OST_MIN_PRECREATE;
		d->opd_pre_last_created_fid = *last_fid;
	}
	/*
	 * This empties the pre-creation pool and effectively blocks any new
	 * reservations.
	 */
	LASSERT(fid_oid(&d->opd_pre_last_created_fid) <=
		LUSTRE_DATA_SEQ_MAX_WIDTH);
	d->opd_pre_used_fid = d->opd_pre_last_created_fid;
	d->opd_pre_grow_slow = 0;
	spin_unlock(&d->opd_pre_lock);

	CDEBUG(D_HA, "%s: Got last_id "DFID" from OST, last_created "DFID
	       "last_used is "DFID"\n", d->opd_obd->obd_name, PFID(last_fid),
	       PFID(&d->opd_pre_last_created_fid), PFID(&d->opd_last_used_fid));
out:
	if (req)
		ptlrpc_req_finished(req);

	spin_lock(&d->opd_pre_lock);
	d->opd_pre_recovering = 0;
	spin_unlock(&d->opd_pre_lock);

	/*
	 * If rc is zero, the pre-creation window should have been emptied.
	 * Since waking up the herd would be useless without pre-created
	 * objects, we defer the signal to osp_precreate_send() in that case.
	 */
	if (rc != 0) {
		if (update_status) {
			CERROR("%s: cannot cleanup orphans: rc = %d\n",
			       d->opd_obd->obd_name, rc);
			/* we can't proceed from here, OST seem to
			 * be in a bad shape, better to wait for
			 * a new instance of the server and repeat
			 * from the beginning. notify possible waiters
			 * this OSP isn't quite functional yet */
			osp_pre_update_status(d, rc);
		} else {
			wake_up(&d->opd_pre_user_waitq);
		}
	}

	RETURN(rc);
}
Example #11
0
static int osp_get_lastfid_from_ost(const struct lu_env *env,
				    struct osp_device *d)
{
	struct ptlrpc_request	*req = NULL;
	struct obd_import	*imp;
	struct lu_fid		*last_fid;
	char			*tmp;
	int			rc;
	ENTRY;

	imp = d->opd_obd->u.cli.cl_import;
	LASSERT(imp);

	req = ptlrpc_request_alloc(imp, &RQF_OST_GET_INFO_LAST_FID);
	if (req == NULL)
		RETURN(-ENOMEM);

	req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY, RCL_CLIENT,
			     sizeof(KEY_LAST_FID));

	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
	if (rc) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}

	tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY);
	memcpy(tmp, KEY_LAST_FID, sizeof(KEY_LAST_FID));

	req->rq_no_delay = req->rq_no_resend = 1;
	last_fid = req_capsule_client_get(&req->rq_pill, &RMF_FID);
	fid_cpu_to_le(last_fid, &d->opd_last_used_fid);

	ptlrpc_request_set_replen(req);

	rc = ptlrpc_queue_wait(req);
	if (rc) {
		/* bad-bad OST.. let sysadm sort this out */
		if (rc == -ENOTSUPP) {
			CERROR("%s: server does not support FID: rc = %d\n",
			       d->opd_obd->obd_name, -ENOTSUPP);
		}
		ptlrpc_set_import_active(imp, 0);
		GOTO(out, rc);
	}

	last_fid = req_capsule_server_get(&req->rq_pill, &RMF_FID);
	if (last_fid == NULL) {
		CERROR("%s: Got last_fid failed.\n", d->opd_obd->obd_name);
		GOTO(out, rc = -EPROTO);
	}

	if (!fid_is_sane(last_fid)) {
		CERROR("%s: Got insane last_fid "DFID"\n",
		       d->opd_obd->obd_name, PFID(last_fid));
		GOTO(out, rc = -EPROTO);
	}

	/* Only update the last used fid, if the OST has objects for
	 * this sequence, i.e. fid_oid > 0 */
	if (fid_oid(last_fid) > 0)
		d->opd_last_used_fid = *last_fid;

	CDEBUG(D_HA, "%s: Got last_fid "DFID"\n", d->opd_obd->obd_name,
	       PFID(last_fid));

out:
	ptlrpc_req_finished(req);
	RETURN(rc);
}
Example #12
0
static int osp_precreate_send(const struct lu_env *env, struct osp_device *d)
{
	struct osp_thread_info	*oti = osp_env_info(env);
	struct ptlrpc_request	*req;
	struct obd_import	*imp;
	struct ost_body		*body;
	int			 rc, grow, diff;
	struct lu_fid		*fid = &oti->osi_fid;
	ENTRY;

	/* don't precreate new objects till OST healthy and has free space */
	if (unlikely(d->opd_pre_status)) {
		CDEBUG(D_INFO, "%s: don't send new precreate: rc = %d\n",
		       d->opd_obd->obd_name, d->opd_pre_status);
		RETURN(0);
	}

	/*
	 * if not connection/initialization is compeleted, ignore
	 */
	imp = d->opd_obd->u.cli.cl_import;
	LASSERT(imp);

	req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
	if (req == NULL)
		RETURN(-ENOMEM);
	req->rq_request_portal = OST_CREATE_PORTAL;
	/* we should not resend create request - anyway we will have delorphan
	 * and kill these objects */
	req->rq_no_delay = req->rq_no_resend = 1;

	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
	if (rc) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}

	spin_lock(&d->opd_pre_lock);
	if (d->opd_pre_grow_count > d->opd_pre_max_grow_count / 2)
		d->opd_pre_grow_count = d->opd_pre_max_grow_count / 2;
	grow = d->opd_pre_grow_count;
	spin_unlock(&d->opd_pre_lock);

	body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
	LASSERT(body);

	*fid = d->opd_pre_last_created_fid;
	rc = osp_precreate_fids(env, d, fid, &grow);
	if (rc == 1) {
		/* Current seq has been used up*/
		if (!osp_is_fid_client(d)) {
			osp_pre_update_status(d, -ENOSPC);
			rc = -ENOSPC;
		}
		wake_up(&d->opd_pre_waitq);
		GOTO(out_req, rc);
	}

	if (!osp_is_fid_client(d)) {
		/* Non-FID client will always send seq 0 because of
		 * compatiblity */
		LASSERTF(fid_is_idif(fid), "Invalid fid "DFID"\n", PFID(fid));
		fid->f_seq = 0;
	}

	fid_to_ostid(fid, &body->oa.o_oi);
	body->oa.o_valid = OBD_MD_FLGROUP;

	ptlrpc_request_set_replen(req);

	rc = ptlrpc_queue_wait(req);
	if (rc) {
		CERROR("%s: can't precreate: rc = %d\n", d->opd_obd->obd_name,
		       rc);
		GOTO(out_req, rc);
	}
	LASSERT(req->rq_transno == 0);

	body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
	if (body == NULL)
		GOTO(out_req, rc = -EPROTO);

	ostid_to_fid(fid, &body->oa.o_oi, d->opd_index);
	LASSERTF(lu_fid_diff(fid, &d->opd_pre_used_fid) > 0,
		 "reply fid "DFID" pre used fid "DFID"\n", PFID(fid),
		 PFID(&d->opd_pre_used_fid));

	diff = lu_fid_diff(fid, &d->opd_pre_last_created_fid);

	spin_lock(&d->opd_pre_lock);
	if (diff < grow) {
		/* the OST has not managed to create all the
		 * objects we asked for */
		d->opd_pre_grow_count = max(diff, OST_MIN_PRECREATE);
		d->opd_pre_grow_slow = 1;
	} else {
		/* the OST is able to keep up with the work,
		 * we could consider increasing grow_count
		 * next time if needed */
		d->opd_pre_grow_slow = 0;
	}

	d->opd_pre_last_created_fid = *fid;
	spin_unlock(&d->opd_pre_lock);

	CDEBUG(D_HA, "%s: current precreated pool: "DFID"-"DFID"\n",
	       d->opd_obd->obd_name, PFID(&d->opd_pre_used_fid),
	       PFID(&d->opd_pre_last_created_fid));
out_req:
	/* now we can wakeup all users awaiting for objects */
	osp_pre_update_status(d, rc);
	wake_up(&d->opd_pre_user_waitq);

	ptlrpc_req_finished(req);
	RETURN(rc);
}
Example #13
0
int osp_object_truncate(const struct lu_env *env, struct dt_object *dt,
			__u64 size)
{
	struct osp_device	*d = lu2osp_dev(dt->do_lu.lo_dev);
	struct ptlrpc_request	*req = NULL;
	struct obd_import	*imp;
	struct ost_body		*body;
	struct obdo		*oa = NULL;
	int			 rc;

	ENTRY;

	imp = d->opd_obd->u.cli.cl_import;
	LASSERT(imp);

	req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
	if (req == NULL)
		RETURN(-ENOMEM);

	/* XXX: capa support? */
	/* osc_set_capa_size(req, &RMF_CAPA1, capa); */
	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
	if (rc) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}

	/*
	 * XXX: decide how do we do here with resend
	 * if we don't resend, then client may see wrong file size
	 * if we do resend, then MDS thread can get stuck for quite long
	 */
	req->rq_no_resend = req->rq_no_delay = 1;

	req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
	ptlrpc_at_set_req_timeout(req);

	OBD_ALLOC_PTR(oa);
	if (oa == NULL)
		GOTO(out, rc = -ENOMEM);

	rc = fid_to_ostid(lu_object_fid(&dt->do_lu), &oa->o_oi);
	LASSERT(rc == 0);
	oa->o_size = size;
	oa->o_blocks = OBD_OBJECT_EOF;
	oa->o_valid = OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
		      OBD_MD_FLID | OBD_MD_FLGROUP;

	body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
	LASSERT(body);
	lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);

	/* XXX: capa support? */
	/* osc_pack_capa(req, body, capa); */

	ptlrpc_request_set_replen(req);

	rc = ptlrpc_queue_wait(req);
	if (rc)
		CERROR("can't punch object: %d\n", rc);
out:
	ptlrpc_req_finished(req);
	if (oa)
		OBD_FREE_PTR(oa);
	RETURN(rc);
}
Example #14
0
int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx)
{
	struct ptlrpc_cli_ctx	*ctx = &gctx->gc_base;
	struct obd_import	*imp = ctx->cc_sec->ps_import;
	struct ptlrpc_request	*req;
	struct ptlrpc_user_desc	*pud;
	int			 rc;
	ENTRY;

	LASSERT(atomic_read(&ctx->cc_refcount) > 0);

	if (cli_ctx_is_error(ctx) || !cli_ctx_is_uptodate(ctx)) {
		CDEBUG(D_SEC, "ctx %p(%u->%s) not uptodate, "
		       "don't send destroy rpc\n", ctx,
		       ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
		RETURN(0);
	}

	might_sleep();

	CWARN("%s ctx %p idx "LPX64" (%u->%s)\n",
	      sec_is_reverse(ctx->cc_sec) ?
	      "server finishing reverse" : "client finishing forward",
	      ctx, gss_handle_to_u64(&gctx->gc_handle),
	      ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));

        gctx->gc_proc = PTLRPC_GSS_PROC_DESTROY;

        req = ptlrpc_request_alloc(imp, &RQF_SEC_CTX);
        if (req == NULL) {
                CWARN("ctx %p(%u): fail to prepare rpc, destroy locally\n",
                      ctx, ctx->cc_vcred.vc_uid);
                GOTO(out, rc = -ENOMEM);
        }

        rc = ptlrpc_request_bufs_pack(req, LUSTRE_OBD_VERSION, SEC_CTX_FINI,
                                      NULL, ctx);
        if (rc) {
                ptlrpc_request_free(req);
                GOTO(out_ref, rc);
        }

        /* fix the user desc */
        if (req->rq_pack_udesc) {
                /* we rely the fact that this request is in AUTH mode,
                 * and user_desc at offset 2. */
                pud = lustre_msg_buf(req->rq_reqbuf, 2, sizeof(*pud));
                LASSERT(pud);
                pud->pud_uid = pud->pud_fsuid = ctx->cc_vcred.vc_uid;
                pud->pud_gid = pud->pud_fsgid = ctx->cc_vcred.vc_gid;
                pud->pud_cap = 0;
                pud->pud_ngroups = 0;
        }

        req->rq_phase = RQ_PHASE_RPC;
        rc = ptl_send_rpc(req, 1);
        if (rc)
                CWARN("ctx %p(%u->%s): rpc error %d, destroy locally\n", ctx,
                      ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec), rc);

out_ref:
        ptlrpc_req_finished(req);
out:
        RETURN(rc);
}
Example #15
0
static int osc_object_fiemap(const struct lu_env *env, struct cl_object *obj,
                             struct ll_fiemap_info_key *fmkey,
                             struct fiemap *fiemap, size_t *buflen)
{
    struct obd_export *exp = osc_export(cl2osc(obj));
    struct ldlm_res_id resid;
    union ldlm_policy_data policy;
    struct lustre_handle lockh;
    enum ldlm_mode mode = LCK_MINMODE;
    struct ptlrpc_request *req;
    struct fiemap *reply;
    char *tmp;
    int rc;
    ENTRY;

    fmkey->lfik_oa.o_oi = cl2osc(obj)->oo_oinfo->loi_oi;
    if (!(fmkey->lfik_fiemap.fm_flags & FIEMAP_FLAG_SYNC))
        goto skip_locking;

    policy.l_extent.start = fmkey->lfik_fiemap.fm_start & PAGE_MASK;

    if (OBD_OBJECT_EOF - fmkey->lfik_fiemap.fm_length <=
            fmkey->lfik_fiemap.fm_start + PAGE_SIZE - 1)
        policy.l_extent.end = OBD_OBJECT_EOF;
    else
        policy.l_extent.end = (fmkey->lfik_fiemap.fm_start +
                               fmkey->lfik_fiemap.fm_length +
                               PAGE_SIZE - 1) & PAGE_MASK;

    ostid_build_res_name(&fmkey->lfik_oa.o_oi, &resid);
    mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
                           LDLM_FL_BLOCK_GRANTED | LDLM_FL_LVB_READY,
                           &resid, LDLM_EXTENT, &policy,
                           LCK_PR | LCK_PW, &lockh, 0);
    if (mode) { /* lock is cached on client */
        if (mode != LCK_PR) {
            ldlm_lock_addref(&lockh, LCK_PR);
            ldlm_lock_decref(&lockh, LCK_PW);
        }
    } else { /* no cached lock, needs acquire lock on server side */
        fmkey->lfik_oa.o_valid |= OBD_MD_FLFLAGS;
        fmkey->lfik_oa.o_flags |= OBD_FL_SRVLOCK;
    }

skip_locking:
    req = ptlrpc_request_alloc(class_exp2cliimp(exp),
                               &RQF_OST_GET_INFO_FIEMAP);
    if (req == NULL)
        GOTO(drop_lock, rc = -ENOMEM);

    req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY, RCL_CLIENT,
                         sizeof(*fmkey));
    req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_CLIENT,
                         *buflen);
    req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_SERVER,
                         *buflen);

    rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
    if (rc != 0) {
        ptlrpc_request_free(req);
        GOTO(drop_lock, rc);
    }
    tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
    memcpy(tmp, fmkey, sizeof(*fmkey));
    tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
    memcpy(tmp, fiemap, *buflen);
    ptlrpc_request_set_replen(req);

    rc = ptlrpc_queue_wait(req);
    if (rc != 0)
        GOTO(fini_req, rc);

    reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
    if (reply == NULL)
        GOTO(fini_req, rc = -EPROTO);

    memcpy(fiemap, reply, *buflen);
fini_req:
    ptlrpc_req_finished(req);
drop_lock:
    if (mode)
        ldlm_lock_decref(&lockh, LCK_PR);
    RETURN(rc);
}
Example #16
0
/*
 * Get intent per-ID lock or global-index lock from master.
 *
 * \param env    - the environment passed by the caller
 * \param exp    - is the export to use to send the intent RPC
 * \param qbody  - quota body to be packed in request
 * \param sync   - synchronous or asynchronous (pre-acquire)
 * \param it_op  - IT_QUOTA_DQACQ or IT_QUOTA_CONN
 * \param completion - completion callback
 * \param qqi    - is the qsd_qtype_info structure to pass to the completion
 *                 function
 * \param lvb    - is the lvb associated with the lock and returned by the
 *                 server
 * \param arg    - is an opaq argument passed to the completion callback
 *
 * \retval 0     - success
 * \retval -ve   - appropriate errors
 */
int qsd_intent_lock(const struct lu_env *env, struct obd_export *exp,
		    struct quota_body *qbody, bool sync, int it_op,
		    qsd_req_completion_t completion, struct qsd_qtype_info *qqi,
		    struct lquota_lvb *lvb, void *arg)
{
	struct qsd_thread_info	*qti = qsd_info(env);
	struct ptlrpc_request	*req;
	struct qsd_async_args	*aa = NULL;
	struct ldlm_intent	*lit;
	struct quota_body	*req_qbody;
	__u64			 flags = LDLM_FL_HAS_INTENT;
	int			 rc;
	ENTRY;

	LASSERT(exp != NULL);
	LASSERT(!lustre_handle_is_used(&qbody->qb_lockh));

	memset(&qti->qti_lockh, 0, sizeof(qti->qti_lockh));

	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
				   &RQF_LDLM_INTENT_QUOTA);
	if (req == NULL)
		GOTO(out, rc = -ENOMEM);

	req->rq_no_retry_einprogress = 1;
	rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
	if (rc) {
		ptlrpc_request_free(req);
		GOTO(out, rc);
	}

	lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
	lit->opc = (__u64)it_op;

	req_qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY);
	*req_qbody = *qbody;

	req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
			     sizeof(*lvb));
	ptlrpc_request_set_replen(req);

	switch(it_op) {
	case IT_QUOTA_CONN:
		/* build resource name associated with global index */
		fid_build_reg_res_name(&qbody->qb_fid, &qti->qti_resid);

		/* copy einfo template and fill ei_cbdata with qqi pointer */
		memcpy(&qti->qti_einfo, &qsd_glb_einfo, sizeof(qti->qti_einfo));
		qti->qti_einfo.ei_cbdata = qqi;

		/* don't cancel global lock on memory pressure */
		flags |= LDLM_FL_NO_LRU;
		break;
	case IT_QUOTA_DQACQ:
		/* build resource name associated for per-ID quota lock */
		fid_build_quota_res_name(&qbody->qb_fid, &qbody->qb_id,
					 &qti->qti_resid);

		/* copy einfo template and fill ei_cbdata with lqe pointer */
		memcpy(&qti->qti_einfo, &qsd_id_einfo, sizeof(qti->qti_einfo));
		qti->qti_einfo.ei_cbdata = arg;
		break;
	default:
		LASSERTF(0, "invalid it_op %d", it_op);
	}

	/* build lock enqueue request */
	rc = ldlm_cli_enqueue(exp, &req, &qti->qti_einfo, &qti->qti_resid, NULL,
			      &flags, (void *)lvb, sizeof(*lvb), LVB_T_LQUOTA,
			      &qti->qti_lockh, 1);
	if (rc < 0) {
		ptlrpc_req_finished(req);
		GOTO(out, rc);
	}

	/* grab reference on backend structure for the new lock */
	switch(it_op) {
	case IT_QUOTA_CONN:
		/* grab reference on qqi for new lock */
#ifdef USE_LU_REF
	{
		struct ldlm_lock	*lock;

		lock = ldlm_handle2lock(&qti->qti_lockh);
		if (lock == NULL) {
			ptlrpc_req_finished(req);
			GOTO(out, rc = -ENOLCK);
		}
		lu_ref_add(&qqi->qqi_reference, "glb_lock", lock);
		LDLM_LOCK_PUT(lock);
	}
#endif
		qqi_getref(qqi);
		break;
	case IT_QUOTA_DQACQ:
		/* grab reference on lqe for new lock */
		lqe_getref((struct lquota_entry *)arg);
		/* all acquire/release request are sent with no_resend and
		 * no_delay flag */
		req->rq_no_resend = req->rq_no_delay = 1;
		break;
	default:
		break;
	}

	CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
	aa = ptlrpc_req_async_args(req);
	aa->aa_exp = exp;
	aa->aa_qqi = qqi;
	aa->aa_arg = arg;
	aa->aa_lvb = lvb;
	aa->aa_completion = completion;
	lustre_handle_copy(&aa->aa_lockh, &qti->qti_lockh);

	if (sync) {
		/* send lock enqueue request and wait for completion */
		rc = ptlrpc_queue_wait(req);
		rc = qsd_intent_interpret(env, req, aa, rc);
		ptlrpc_req_finished(req);
	} else {
		/* queue lock request and return */
		req->rq_interpret_reply = qsd_intent_interpret;
		ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
	}

	RETURN(rc);
out:
	completion(env, qqi, qbody, NULL, &qti->qti_lockh, lvb, arg, rc);
	return rc;
}
Example #17
0
/*
 * Fetch a global or slave index from the QMT.
 *
 * \param env    - the environment passed by the caller
 * \param exp    - is the export to use to issue the OBD_IDX_READ RPC
 * \param ii     - is the index information to be packed in the request
 *                 on success, the index information returned by the server
 *                 is copied there.
 * \param npages - is the number of pages in the pages array
 * \param pages  - is an array of @npages pages
 *
 * \retval 0     - success
 * \retval -ve   - appropriate errors
 */
int qsd_fetch_index(const struct lu_env *env, struct obd_export *exp,
		    struct idx_info *ii, unsigned int npages,
		    struct page **pages, bool *need_swab)
{
	struct ptlrpc_request	*req;
	struct idx_info		*req_ii;
	struct ptlrpc_bulk_desc *desc;
	int			 rc, i;
	ENTRY;

	LASSERT(exp);

	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OBD_IDX_READ);
	if (req == NULL)
		RETURN(-ENOMEM);

	rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, OBD_IDX_READ);
	if (rc) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}

	req->rq_request_portal = MDS_READPAGE_PORTAL;
	ptlrpc_at_set_req_timeout(req);

	/* allocate bulk descriptor */
	desc = ptlrpc_prep_bulk_imp(req, npages, 1, BULK_PUT_SINK,
				    MDS_BULK_PORTAL);
	if (desc == NULL) {
		ptlrpc_request_free(req);
		RETURN(-ENOMEM);
	}

	/* req now owns desc and will free it when it gets freed */
	for (i = 0; i < npages; i++)
		ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);

	/* pack index information in request */
	req_ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO);
	*req_ii = *ii;

	ptlrpc_request_set_replen(req);

	/* send request to master and wait for RPC to complete */
	rc = ptlrpc_queue_wait(req);
	if (rc)
		GOTO(out, rc);

	rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
					  req->rq_bulk->bd_nob_transferred);
	if (rc < 0)
		GOTO(out, rc);
	else
		/* sptlrpc_cli_unwrap_bulk_read() returns the number of bytes
		 * transferred*/
		rc = 0;

	req_ii = req_capsule_server_get(&req->rq_pill, &RMF_IDX_INFO);
	*ii = *req_ii;

	*need_swab = ptlrpc_rep_need_swab(req);

	EXIT;
out:
	ptlrpc_req_finished(req);
	return rc;
}
Example #18
0
/* This is a callback from the llog_* functions.
 * Assumes caller has already pushed us into the kernel context.
 */
static int llog_client_open(const struct lu_env *env,
			    struct llog_handle *lgh, struct llog_logid *logid,
			    char *name, enum llog_open_param open_param)
{
	struct obd_import *imp;
	struct llogd_body *body;
	struct llog_ctxt *ctxt = lgh->lgh_ctxt;
	struct ptlrpc_request *req = NULL;
	int rc;

	LLOG_CLIENT_ENTRY(ctxt, imp);

	/* client cannot create llog */
	LASSERTF(open_param != LLOG_OPEN_NEW, "%#x\n", open_param);
	LASSERT(lgh);

	req = ptlrpc_request_alloc(imp, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
	if (!req) {
		rc = -ENOMEM;
		goto out;
	}

	if (name)
		req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
				     strlen(name) + 1);

	rc = ptlrpc_request_pack(req, LUSTRE_LOG_VERSION,
				 LLOG_ORIGIN_HANDLE_CREATE);
	if (rc) {
		ptlrpc_request_free(req);
		req = NULL;
		goto out;
	}
	ptlrpc_request_set_replen(req);

	body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
	if (logid)
		body->lgd_logid = *logid;
	body->lgd_ctxt_idx = ctxt->loc_idx - 1;

	if (name) {
		char *tmp;

		tmp = req_capsule_client_sized_get(&req->rq_pill, &RMF_NAME,
						   strlen(name) + 1);
		LASSERT(tmp);
		strcpy(tmp, name);
	}

	rc = ptlrpc_queue_wait(req);
	if (rc)
		goto out;

	body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
	if (!body) {
		rc = -EFAULT;
		goto out;
	}

	lgh->lgh_id = body->lgd_logid;
	lgh->lgh_ctxt = ctxt;
out:
	LLOG_CLIENT_EXIT(ctxt, imp);
	ptlrpc_req_finished(req);
	return rc;
}
Example #19
0
int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
               const void *data, int datalen, int mode, __u32 uid, __u32 gid,
               cfs_cap_t cap_effective, __u64 rdev,
               struct ptlrpc_request **request)
{
        struct ptlrpc_request *req;
        int level, rc;
        int count, resends = 0;
        struct obd_import *import = exp->exp_obd->u.cli.cl_import;
        int generation = import->imp_generation;
        CFS_LIST_HEAD(cancels);
        ENTRY;

        /* For case if upper layer did not alloc fid, do it now. */
        if (!fid_is_sane(&op_data->op_fid2)) {
                /*
                 * mdc_fid_alloc() may return errno 1 in case of switch to new
                 * sequence, handle this.
                 */
                rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
                if (rc < 0) {
                        CERROR("Can't alloc new fid, rc %d\n", rc);
                        RETURN(rc);
                }
        }

rebuild:
        count = 0;
        if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
            (fid_is_sane(&op_data->op_fid1)))
                count = mdc_resource_get_unused(exp, &op_data->op_fid1,
                                                &cancels, LCK_EX,
                                                MDS_INODELOCK_UPDATE);

        req = ptlrpc_request_alloc(class_exp2cliimp(exp),
                                   &RQF_MDS_REINT_CREATE_RMT_ACL);
        if (req == NULL) {
                ldlm_lock_list_put(&cancels, l_bl_ast, count);
                RETURN(-ENOMEM);
        }
        mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
        req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
                             op_data->op_namelen + 1);
        req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
                             data && datalen ? datalen : 0);

	rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
	if (rc) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}

        /*
         * mdc_create_pack() fills msg->bufs[1] with name and msg->bufs[2] with
         * tgt, for symlinks or lov MD data.
         */
        mdc_create_pack(req, op_data, data, datalen, mode, uid,
                        gid, cap_effective, rdev);

        ptlrpc_request_set_replen(req);

	/* ask ptlrpc not to resend on EINPROGRESS since we have our own retry
	 * logic here */
	req->rq_no_retry_einprogress = 1;

        if (resends) {
                req->rq_generation_set = 1;
                req->rq_import_generation = generation;
                req->rq_sent = cfs_time_current_sec() + resends;
        }
        level = LUSTRE_IMP_FULL;
 resend:
        rc = mdc_reint(req, exp->exp_obd->u.cli.cl_rpc_lock, level);

        /* Resend if we were told to. */
        if (rc == -ERESTARTSYS) {
                level = LUSTRE_IMP_RECOVER;
                goto resend;
        } else if (rc == -EINPROGRESS) {
                /* Retry create infinitely until succeed or get other
                 * error code. */
                ptlrpc_req_finished(req);
                resends++;

                CDEBUG(D_HA, "%s: resend:%d create on "DFID"/"DFID"\n",
                       exp->exp_obd->obd_name, resends,
                       PFID(&op_data->op_fid1), PFID(&op_data->op_fid2));

                if (generation == import->imp_generation) {
                        goto rebuild;
                } else {
                        CDEBUG(D_HA, "resend cross eviction\n");
                        RETURN(-EIO);
                }
        } else if (rc == 0) {
                struct mdt_body *body;
                struct lustre_capa *capa;

                body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
                LASSERT(body);
                if (body->valid & OBD_MD_FLMDSCAPA) {
                        capa = req_capsule_server_get(&req->rq_pill,
                                                      &RMF_CAPA1);
                        if (capa == NULL)
                                rc = -EPROTO;
                }
        }

        *request = req;
        RETURN(rc);
}
Example #20
0
int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
               const void *data, int datalen, int mode, __u32 uid, __u32 gid,
               cfs_cap_t cap_effective, __u64 rdev,
               struct ptlrpc_request **request)
{
        struct ptlrpc_request *req;
        int level, rc;
        int count = 0;
        CFS_LIST_HEAD(cancels);
        ENTRY;

        /* For case if upper layer did not alloc fid, do it now. */
        if (!fid_is_sane(&op_data->op_fid2)) {
                /*
                 * mdc_fid_alloc() may return errno 1 in case of switch to new
                 * sequence, handle this.
                 */
                rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
                if (rc < 0) {
                        CERROR("Can't alloc new fid, rc %d\n", rc);
                        RETURN(rc);
                }
        }

        if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
            (fid_is_sane(&op_data->op_fid1)))
                count = mdc_resource_get_unused(exp, &op_data->op_fid1,
                                                &cancels, LCK_EX,
                                                MDS_INODELOCK_UPDATE);

        req = ptlrpc_request_alloc(class_exp2cliimp(exp),
                                   &RQF_MDS_REINT_CREATE_RMT_ACL);
        if (req == NULL) {
                ldlm_lock_list_put(&cancels, l_bl_ast, count);
                RETURN(-ENOMEM);
        }
        mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
        req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
                             op_data->op_namelen + 1);
        req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
                             data && datalen ? datalen : 0);

        rc = mdc_prep_elc_req(exp, req, &cancels, count);
        if (rc) {
                ptlrpc_request_free(req);
                RETURN(rc);
        }

        /*
         * mdc_create_pack() fills msg->bufs[1] with name and msg->bufs[2] with
         * tgt, for symlinks or lov MD data.
         */
        mdc_create_pack(req, op_data, data, datalen, mode, uid,
                        gid, cap_effective, rdev);

        ptlrpc_request_set_replen(req);

        level = LUSTRE_IMP_FULL;
 resend:
        rc = mdc_reint(req, exp->exp_obd->u.cli.cl_rpc_lock, level);

        /* Resend if we were told to. */
        if (rc == -ERESTARTSYS) {
                level = LUSTRE_IMP_RECOVER;
                goto resend;
        } else if (rc == 0) {
                struct mdt_body *body;
                struct lustre_capa *capa;

                body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
                LASSERT(body);
                if (body->valid & OBD_MD_FLMDSCAPA) {
                        capa = req_capsule_server_get(&req->rq_pill,
                                                      &RMF_CAPA1);
                        if (capa == NULL)
                                rc = -EPROTO;
                }
        }

        *request = req;
        RETURN(rc);
}
Example #21
0
/* If mdc_setattr is called with an 'iattr', then it is a normal RPC that
 * should take the normal semaphore and go to the normal portal.
 *
 * If it is called with iattr->ia_valid & ATTR_FROM_OPEN, then it is a
 * magic open-path setattr that should take the setattr semaphore and
 * go to the setattr portal. */
int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
                void *ea, int ealen, void *ea2, int ea2len,
                struct ptlrpc_request **request, struct md_open_data **mod)
{
        CFS_LIST_HEAD(cancels);
        struct ptlrpc_request *req;
        struct mdc_rpc_lock *rpc_lock;
        struct obd_device *obd = exp->exp_obd;
        int count = 0, rc;
        __u64 bits;
        ENTRY;

        LASSERT(op_data != NULL);

        bits = MDS_INODELOCK_UPDATE;
        if (op_data->op_attr.ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID))
                bits |= MDS_INODELOCK_LOOKUP;
        if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
            (fid_is_sane(&op_data->op_fid1)))
                count = mdc_resource_get_unused(exp, &op_data->op_fid1,
                                                &cancels, LCK_EX, bits);
        req = ptlrpc_request_alloc(class_exp2cliimp(exp),
                                   &RQF_MDS_REINT_SETATTR);
        if (req == NULL) {
                ldlm_lock_list_put(&cancels, l_bl_ast, count);
                RETURN(-ENOMEM);
        }
        mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
        if ((op_data->op_flags & (MF_SOM_CHANGE | MF_EPOCH_OPEN)) == 0)
                req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT,
                                     0);
        req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen);
        req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT,
                             ea2len);

        rc = mdc_prep_elc_req(exp, req, &cancels, count);
        if (rc) {
                ptlrpc_request_free(req);
                RETURN(rc);
        }

        if (op_data->op_attr.ia_valid & ATTR_FROM_OPEN) {
                req->rq_request_portal = MDS_SETATTR_PORTAL;
                ptlrpc_at_set_req_timeout(req);
                rpc_lock = obd->u.cli.cl_setattr_lock;
        } else {
                rpc_lock = obd->u.cli.cl_rpc_lock;
        }

        if (op_data->op_attr.ia_valid & (ATTR_MTIME | ATTR_CTIME))
                CDEBUG(D_INODE, "setting mtime "CFS_TIME_T
                       ", ctime "CFS_TIME_T"\n",
                       LTIME_S(op_data->op_attr.ia_mtime),
                       LTIME_S(op_data->op_attr.ia_ctime));
        mdc_setattr_pack(req, op_data, ea, ealen, ea2, ea2len);

        ptlrpc_request_set_replen(req);
        if (mod && (op_data->op_flags & MF_EPOCH_OPEN) &&
            req->rq_import->imp_replayable)
        {
                LASSERT(*mod == NULL);

                *mod = obd_mod_alloc();
                if (*mod == NULL) {
                        DEBUG_REQ(D_ERROR, req, "Can't allocate "
                                  "md_open_data");
                } else {
                        req->rq_replay = 1;
                        req->rq_cb_data = *mod;
                        (*mod)->mod_open_req = req;
                        req->rq_commit_cb = mdc_commit_open;
                        /**
                         * Take an extra reference on \var mod, it protects \var
                         * mod from being freed on eviction (commit callback is
                         * called despite rq_replay flag).
                         * Will be put on mdc_done_writing().
                         */
                        obd_mod_get(*mod);
                }
        }

        rc = mdc_reint(req, rpc_lock, LUSTRE_IMP_FULL);

        /* Save the obtained info in the original RPC for the replay case. */
        if (rc == 0 && (op_data->op_flags & MF_EPOCH_OPEN)) {
                struct mdt_ioepoch *epoch;
                struct mdt_body  *body;

                epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
                body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
                LASSERT(epoch != NULL);
                LASSERT(body != NULL);
                epoch->handle = body->handle;
                epoch->ioepoch = body->ioepoch;
                req->rq_replay_cb = mdc_replay_open;
        /** bug 3633, open may be committed and estale answer is not error */
        } else if (rc == -ESTALE && (op_data->op_flags & MF_SOM_CHANGE)) {
                rc = 0;
        } else if (rc == -ERESTARTSYS) {
                rc = 0;
        }
        *request = req;
        if (rc && req->rq_commit_cb) {
                /* Put an extra reference on \var mod on error case. */
                obd_mod_put(*mod);
                req->rq_commit_cb(req);
        }
        RETURN(rc);
}
Example #22
0
int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
		const void *data, size_t datalen,
		umode_t mode, uid_t uid, gid_t gid,
		cfs_cap_t cap_effective, __u64 rdev,
		struct ptlrpc_request **request)
{
        struct ptlrpc_request *req;
        int level, rc;
        int count, resends = 0;
        struct obd_import *import = exp->exp_obd->u.cli.cl_import;
        int generation = import->imp_generation;
	struct list_head cancels = LIST_HEAD_INIT(cancels);
        ENTRY;

	/* For case if upper layer did not alloc fid, do it now. */
	if (!fid_is_sane(&op_data->op_fid2)) {
		/*
		 * mdc_fid_alloc() may return errno 1 in case of switch to new
		 * sequence, handle this.
		 */
		rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
		if (rc < 0)
			RETURN(rc);
	}

rebuild:
        count = 0;
        if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
            (fid_is_sane(&op_data->op_fid1)))
                count = mdc_resource_get_unused(exp, &op_data->op_fid1,
                                                &cancels, LCK_EX,
                                                MDS_INODELOCK_UPDATE);

        req = ptlrpc_request_alloc(class_exp2cliimp(exp),
				   &RQF_MDS_REINT_CREATE_ACL);
        if (req == NULL) {
                ldlm_lock_list_put(&cancels, l_bl_ast, count);
                RETURN(-ENOMEM);
        }

        req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
                             op_data->op_namelen + 1);
        req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
                             data && datalen ? datalen : 0);

	req_capsule_set_size(&req->rq_pill, &RMF_FILE_SECCTX_NAME,
			     RCL_CLIENT, op_data->op_file_secctx_name != NULL ?
			     strlen(op_data->op_file_secctx_name) + 1 : 0);

	req_capsule_set_size(&req->rq_pill, &RMF_FILE_SECCTX, RCL_CLIENT,
			     op_data->op_file_secctx_size);

	/* get SELinux policy info if any */
	rc = sptlrpc_get_sepol(req);
	if (rc < 0) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}
	req_capsule_set_size(&req->rq_pill, &RMF_SELINUX_POL, RCL_CLIENT,
			     strlen(req->rq_sepol) ?
			     strlen(req->rq_sepol) + 1 : 0);

	rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
	if (rc) {
		ptlrpc_request_free(req);
		RETURN(rc);
	}

        /*
         * mdc_create_pack() fills msg->bufs[1] with name and msg->bufs[2] with
         * tgt, for symlinks or lov MD data.
         */
        mdc_create_pack(req, op_data, data, datalen, mode, uid,
                        gid, cap_effective, rdev);

        ptlrpc_request_set_replen(req);

	/* ask ptlrpc not to resend on EINPROGRESS since we have our own retry
	 * logic here */
	req->rq_no_retry_einprogress = 1;

        if (resends) {
                req->rq_generation_set = 1;
                req->rq_import_generation = generation;
		req->rq_sent = ktime_get_real_seconds() + resends;
        }
        level = LUSTRE_IMP_FULL;
 resend:
	rc = mdc_reint(req, level);

        /* Resend if we were told to. */
        if (rc == -ERESTARTSYS) {
                level = LUSTRE_IMP_RECOVER;
                goto resend;
        } else if (rc == -EINPROGRESS) {
		/* Retry create infinitely until succeed or get other
		 * error code or interrupted. */
		ptlrpc_req_finished(req);
		if (generation == import->imp_generation) {
			if (signal_pending(current))
				RETURN(-EINTR);

			resends++;
			CDEBUG(D_HA, "%s: resend:%d create on "DFID"/"DFID"\n",
			       exp->exp_obd->obd_name, resends,
			       PFID(&op_data->op_fid1),
			       PFID(&op_data->op_fid2));
			goto rebuild;
                } else {
                        CDEBUG(D_HA, "resend cross eviction\n");
                        RETURN(-EIO);
                }
        }

        *request = req;
        RETURN(rc);
}