Example #1
0
/* Find and cancel locally locks matched by inode @bits & @mode in the resource
 * found by @fid. Found locks are added into @cancel list. Returns the amount of
 * locks added to @cancels list. */
int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
			    struct list_head *cancels, enum ldlm_mode mode,
			    __u64 bits)
{
	struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
	union ldlm_policy_data policy = { {0} };
	struct ldlm_res_id res_id;
	struct ldlm_resource *res;
	int count;
	ENTRY;

	/* Return, i.e. cancel nothing, only if ELC is supported (flag in
	 * export) but disabled through procfs (flag in NS).
	 *
	 * This distinguishes from a case when ELC is not supported originally,
	 * when we still want to cancel locks in advance and just cancel them
	 * locally, without sending any RPC. */
	if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
		RETURN(0);

	fid_build_reg_res_name(fid, &res_id);
	res = ldlm_resource_get(exp->exp_obd->obd_namespace,
				NULL, &res_id, 0, 0);
	if (IS_ERR(res))
		RETURN(0);
	LDLM_RESOURCE_ADDREF(res);
	/* Initialize ibits lock policy. */
	policy.l_inodebits.bits = bits;
	count = ldlm_cancel_resource_local(res, cancels, &policy,
					   mode, 0, 0, NULL);
	LDLM_RESOURCE_DELREF(res);
	ldlm_resource_putref(res);
	RETURN(count);
}
Example #2
0
static int mdt_rename_lock(struct mdt_thread_info *info,
                           struct lustre_handle *lh)
{
	struct ldlm_namespace	*ns = info->mti_mdt->mdt_namespace;
	ldlm_policy_data_t	*policy = &info->mti_policy;
	struct ldlm_res_id	*res_id = &info->mti_res_id;
	__u64			flags = 0;
	int rc;
	ENTRY;

	fid_build_reg_res_name(&LUSTRE_BFL_FID, res_id);

	memset(policy, 0, sizeof *policy);
	policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 4, 53, 0)
	/* In phase I, we will not do cross-rename, so local BFL lock would
	 * be enough
	 */
	flags = LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB;
	/*
	 * Current node is controller, that is mdt0, where we should
	 * take BFL lock.
	 */
	rc = ldlm_cli_enqueue_local(ns, res_id, LDLM_IBITS, policy,
				    LCK_EX, &flags, ldlm_blocking_ast,
				    ldlm_completion_ast, NULL, NULL, 0,
				    LVB_T_NONE,
				    &info->mti_exp->exp_handle.h_cookie,
				    lh);
#else
#warning "Local rename lock is invalid for DNE phase II."
#endif
        RETURN(rc);
}
Example #3
0
static int mdt_rename_lock(struct mdt_thread_info *info,
                           struct lustre_handle *lh)
{
        struct ldlm_namespace *ns     = info->mti_mdt->mdt_namespace;
        ldlm_policy_data_t    *policy = &info->mti_policy;
        struct ldlm_res_id    *res_id = &info->mti_res_id;
        struct md_site        *ms;
        int rc;
        ENTRY;

        ms = mdt_md_site(info->mti_mdt);
        fid_build_reg_res_name(&LUSTRE_BFL_FID, res_id);

        memset(policy, 0, sizeof *policy);
        policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;

        if (ms->ms_control_exp == NULL) {
                int flags = LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB;

                /*
                 * Current node is controller, that is mdt0, where we should
                 * take BFL lock.
                 */
                rc = ldlm_cli_enqueue_local(ns, res_id, LDLM_IBITS, policy,
                                            LCK_EX, &flags, ldlm_blocking_ast,
                                            ldlm_completion_ast, NULL, NULL, 0,
                                            &info->mti_exp->exp_handle.h_cookie,
                                            lh);
        } else {
                struct ldlm_enqueue_info einfo = { LDLM_IBITS, LCK_EX,
                     ldlm_blocking_ast, ldlm_completion_ast, NULL, NULL, NULL };
                int flags = 0;

                /*
                 * This is the case mdt0 is remote node, issue DLM lock like
                 * other clients.
                 */
                rc = ldlm_cli_enqueue(ms->ms_control_exp, NULL, &einfo, res_id,
                                      policy, &flags, NULL, 0, lh, 0);
        }

        RETURN(rc);
}
Example #4
0
/**
 * Finds an existing lock covering a page with given index.
 * Copy of osc_obj_dlmlock_at_pgoff() but for DoM IBITS lock.
 */
struct ldlm_lock *mdc_dlmlock_at_pgoff(const struct lu_env *env,
				       struct osc_object *obj, pgoff_t index,
				       enum osc_dap_flags dap_flags)
{
	struct osc_thread_info *info = osc_env_info(env);
	struct ldlm_res_id *resname = &info->oti_resname;
	union ldlm_policy_data *policy = &info->oti_policy;
	struct lustre_handle lockh;
	struct ldlm_lock *lock = NULL;
	enum ldlm_mode mode;
	__u64 flags;

	ENTRY;

	fid_build_reg_res_name(lu_object_fid(osc2lu(obj)), resname);
	mdc_lock_build_policy(env, policy);

	flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
	if (dap_flags & OSC_DAP_FL_TEST_LOCK)
		flags |= LDLM_FL_TEST_LOCK;

again:
	/* Next, search for already existing extent locks that will cover us */
	/* If we're trying to read, we also search for an existing PW lock.  The
	 * VFS and page cache already protect us locally, so lots of readers/
	 * writers can share a single PW lock. */
	mode = mdc_dom_lock_match(env, osc_export(obj), resname, LDLM_IBITS,
				  policy, LCK_PR | LCK_PW, &flags, obj, &lockh,
				  dap_flags & OSC_DAP_FL_CANCELING);
	if (mode != 0) {
		lock = ldlm_handle2lock(&lockh);
		/* RACE: the lock is cancelled so let's try again */
		if (unlikely(lock == NULL))
			goto again;
	}

	RETURN(lock);
}
Example #5
0
/* Find and cancel locally locks matched by inode @bits & @mode in the resource
 * found by @fid. Found locks are added into @cancel list. Returns the amount of
 * locks added to @cancels list. */
int mdc_resource_get_unused(struct obd_export *exp, struct lu_fid *fid,
                            cfs_list_t *cancels, ldlm_mode_t mode,
                            __u64 bits)
{
        ldlm_policy_data_t policy = {{0}};
        struct ldlm_res_id res_id;
        struct ldlm_resource *res;
        int count;
        ENTRY;

        fid_build_reg_res_name(fid, &res_id);
        res = ldlm_resource_get(exp->exp_obd->obd_namespace,
                                NULL, &res_id, 0, 0);
        if (res == NULL)
                RETURN(0);
        LDLM_RESOURCE_ADDREF(res);
        /* Initialize ibits lock policy. */
        policy.l_inodebits.bits = bits;
        count = ldlm_cancel_resource_local(res, cancels, &policy,
                                           mode, 0, 0, NULL);
        LDLM_RESOURCE_DELREF(res);
        ldlm_resource_putref(res);
        RETURN(count);
}
Example #6
0
/*
 * Get intent per-ID lock or global-index lock from master.
 *
 * \param env    - the environment passed by the caller
 * \param exp    - is the export to use to send the intent RPC
 * \param qbody  - quota body to be packed in request
 * \param sync   - synchronous or asynchronous (pre-acquire)
 * \param it_op  - IT_QUOTA_DQACQ or IT_QUOTA_CONN
 * \param completion - completion callback
 * \param qqi    - is the qsd_qtype_info structure to pass to the completion
 *                 function
 * \param lvb    - is the lvb associated with the lock and returned by the
 *                 server
 * \param arg    - is an opaq argument passed to the completion callback
 *
 * \retval 0     - success
 * \retval -ve   - appropriate errors
 */
int qsd_intent_lock(const struct lu_env *env, struct obd_export *exp,
		    struct quota_body *qbody, bool sync, int it_op,
		    qsd_req_completion_t completion, struct qsd_qtype_info *qqi,
		    struct lquota_lvb *lvb, void *arg)
{
	struct qsd_thread_info	*qti = qsd_info(env);
	struct ptlrpc_request	*req;
	struct qsd_async_args	*aa = NULL;
	struct ldlm_intent	*lit;
	struct quota_body	*req_qbody;
	__u64			 flags = LDLM_FL_HAS_INTENT;
	int			 rc;
	ENTRY;

	LASSERT(exp != NULL);
	LASSERT(!lustre_handle_is_used(&qbody->qb_lockh));

	memset(&qti->qti_lockh, 0, sizeof(qti->qti_lockh));

	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
				   &RQF_LDLM_INTENT_QUOTA);
	if (req == NULL)
		GOTO(out, rc = -ENOMEM);

	req->rq_no_retry_einprogress = 1;
	rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
	if (rc) {
		ptlrpc_request_free(req);
		GOTO(out, rc);
	}

	lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
	lit->opc = (__u64)it_op;

	req_qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY);
	*req_qbody = *qbody;

	req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
			     sizeof(*lvb));
	ptlrpc_request_set_replen(req);

	switch(it_op) {
	case IT_QUOTA_CONN:
		/* build resource name associated with global index */
		fid_build_reg_res_name(&qbody->qb_fid, &qti->qti_resid);

		/* copy einfo template and fill ei_cbdata with qqi pointer */
		memcpy(&qti->qti_einfo, &qsd_glb_einfo, sizeof(qti->qti_einfo));
		qti->qti_einfo.ei_cbdata = qqi;

		/* don't cancel global lock on memory pressure */
		flags |= LDLM_FL_NO_LRU;
		break;
	case IT_QUOTA_DQACQ:
		/* build resource name associated for per-ID quota lock */
		fid_build_quota_res_name(&qbody->qb_fid, &qbody->qb_id,
					 &qti->qti_resid);

		/* copy einfo template and fill ei_cbdata with lqe pointer */
		memcpy(&qti->qti_einfo, &qsd_id_einfo, sizeof(qti->qti_einfo));
		qti->qti_einfo.ei_cbdata = arg;
		break;
	default:
		LASSERTF(0, "invalid it_op %d", it_op);
	}

	/* build lock enqueue request */
	rc = ldlm_cli_enqueue(exp, &req, &qti->qti_einfo, &qti->qti_resid, NULL,
			      &flags, (void *)lvb, sizeof(*lvb), LVB_T_LQUOTA,
			      &qti->qti_lockh, 1);
	if (rc < 0) {
		ptlrpc_req_finished(req);
		GOTO(out, rc);
	}

	/* grab reference on backend structure for the new lock */
	switch(it_op) {
	case IT_QUOTA_CONN:
		/* grab reference on qqi for new lock */
#ifdef USE_LU_REF
	{
		struct ldlm_lock	*lock;

		lock = ldlm_handle2lock(&qti->qti_lockh);
		if (lock == NULL) {
			ptlrpc_req_finished(req);
			GOTO(out, rc = -ENOLCK);
		}
		lu_ref_add(&qqi->qqi_reference, "glb_lock", lock);
		LDLM_LOCK_PUT(lock);
	}
#endif
		qqi_getref(qqi);
		break;
	case IT_QUOTA_DQACQ:
		/* grab reference on lqe for new lock */
		lqe_getref((struct lquota_entry *)arg);
		/* all acquire/release request are sent with no_resend and
		 * no_delay flag */
		req->rq_no_resend = req->rq_no_delay = 1;
		break;
	default:
		break;
	}

	CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
	aa = ptlrpc_req_async_args(req);
	aa->aa_exp = exp;
	aa->aa_qqi = qqi;
	aa->aa_arg = arg;
	aa->aa_lvb = lvb;
	aa->aa_completion = completion;
	lustre_handle_copy(&aa->aa_lockh, &qti->qti_lockh);

	if (sync) {
		/* send lock enqueue request and wait for completion */
		rc = ptlrpc_queue_wait(req);
		rc = qsd_intent_interpret(env, req, aa, rc);
		ptlrpc_req_finished(req);
	} else {
		/* queue lock request and return */
		req->rq_interpret_reply = qsd_intent_interpret;
		ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
	}

	RETURN(rc);
out:
	completion(env, qqi, qbody, NULL, &qti->qti_lockh, lvb, arg, rc);
	return rc;
}